27 |
jakw |
1 |
#include "SMCalibrationWorker.h"
|
|
|
2 |
#include "SMCalibrationParameters.h"
|
22 |
jakw |
3 |
|
31 |
jakw |
4 |
#include "cvtools.h"
|
|
|
5 |
|
22 |
jakw |
6 |
#include <QSettings>
|
|
|
7 |
|
196 |
jakw |
8 |
#include <ceres/ceres.h>
|
|
|
9 |
|
|
|
10 |
|
|
|
11 |
struct CircleResidual {
|
207 |
flgw |
12 |
CircleResidual(std::vector<cv::Point3f> _pointsOnArc)
|
|
|
13 |
: pointsOnArc(_pointsOnArc) {}
|
196 |
jakw |
14 |
|
207 |
flgw |
15 |
template <typename T>
|
|
|
16 |
bool operator()(const T* point, const T* axis, T* residual) const {
|
196 |
jakw |
17 |
|
207 |
flgw |
18 |
T axisSqNorm = axis[0]*axis[0] + axis[1]*axis[1] + axis[2]*axis[2];
|
196 |
jakw |
19 |
|
207 |
flgw |
20 |
unsigned int l = pointsOnArc.size();
|
|
|
21 |
std::vector<T> dI(l);
|
197 |
jakw |
22 |
|
207 |
flgw |
23 |
// note, this is automatically initialized to 0
|
|
|
24 |
T sum(0.0);
|
197 |
jakw |
25 |
|
207 |
flgw |
26 |
for(unsigned int i=0; i<l; i++){
|
|
|
27 |
cv::Point3d p = pointsOnArc[i];
|
|
|
28 |
//T p[3] = {pointsOnArc[i].x, pointsOnArc[i].y, pointsOnArc[i].z};
|
197 |
jakw |
29 |
|
207 |
flgw |
30 |
// point to line distance
|
|
|
31 |
T dotProd = (point[0]-p.x)*axis[0] + (point[1]-p.y)*axis[1] + (point[2]-p.z)*axis[2];
|
|
|
32 |
T dIx = point[0] - p.x - (dotProd*axis[0]/axisSqNorm);
|
|
|
33 |
T dIy = point[1] - p.y - (dotProd*axis[1]/axisSqNorm);
|
|
|
34 |
T dIz = point[2] - p.z - (dotProd*axis[2]/axisSqNorm);
|
|
|
35 |
dI[i] = ceres::sqrt(dIx*dIx + dIy*dIy + dIz*dIz);
|
197 |
jakw |
36 |
|
207 |
flgw |
37 |
sum += dI[i];
|
|
|
38 |
}
|
197 |
jakw |
39 |
|
207 |
flgw |
40 |
T mean = sum / double(l);
|
197 |
jakw |
41 |
|
207 |
flgw |
42 |
for(unsigned int i=0; i<l; i++){
|
|
|
43 |
residual[i] = dI[i] - mean;
|
|
|
44 |
}
|
|
|
45 |
|
|
|
46 |
return true;
|
196 |
jakw |
47 |
}
|
|
|
48 |
|
207 |
flgw |
49 |
private:
|
196 |
jakw |
50 |
|
207 |
flgw |
51 |
// Observations for one checkerboard corner.
|
|
|
52 |
const std::vector<cv::Point3f> pointsOnArc;
|
196 |
jakw |
53 |
};
|
|
|
54 |
|
|
|
55 |
|
|
|
56 |
// Closed form solution to solve for the rotation axis from sets of 3D point coordinates of flat pattern feature points
|
|
|
57 |
// Algorithm according to Chen et al., Rotation axis calibration of a turntable using constrained global optimization, Optik 2014
|
|
|
58 |
// DTU, 2014, Jakob Wilm
|
206 |
flgw |
59 |
static void rotationAxisCalibration(const std::vector< std::vector<cv::Point3f> > Qcam,
|
|
|
60 |
const std::vector<cv::Point3f> Qobj,
|
|
|
61 |
cv::Vec3f &axis, cv::Vec3f &point, float &error){
|
207 |
flgw |
62 |
assert(Qobj.size() == Qcam[0].size());
|
196 |
jakw |
63 |
|
|
|
64 |
// number of frames (points on each arch)
|
|
|
65 |
int l = Qcam.size();
|
|
|
66 |
|
|
|
67 |
// number of points in each frame
|
|
|
68 |
size_t mn = Qobj.size();
|
|
|
69 |
|
|
|
70 |
// construct matrix for axis determination
|
|
|
71 |
cv::Mat M(6, 6, CV_32F, cv::Scalar(0));
|
|
|
72 |
|
|
|
73 |
for(int k=0; k<l; k++){
|
|
|
74 |
for(unsigned int idx=0; idx<mn; idx++){
|
|
|
75 |
|
207 |
flgw |
76 |
// float i = Qobj[idx].x+4;
|
|
|
77 |
// float j = Qobj[idx].y+4;
|
196 |
jakw |
78 |
float i = Qobj[idx].x;
|
|
|
79 |
float j = Qobj[idx].y;
|
|
|
80 |
|
|
|
81 |
float x = Qcam[k][idx].x;
|
|
|
82 |
float y = Qcam[k][idx].y;
|
|
|
83 |
float z = Qcam[k][idx].z;
|
|
|
84 |
|
|
|
85 |
M += (cv::Mat_<float>(6,6) << x*x, x*y, x*z, x, i*x, j*x,
|
207 |
flgw |
86 |
0, y*y, y*z, y, i*y, j*y,
|
|
|
87 |
0, 0, z*z, z, i*z, j*z,
|
|
|
88 |
0, 0, 0, 1, i, j,
|
|
|
89 |
0, 0, 0, 0, i*i, i*j,
|
|
|
90 |
0, 0, 0, 0, 0, j*j);
|
196 |
jakw |
91 |
|
|
|
92 |
}
|
|
|
93 |
}
|
|
|
94 |
|
|
|
95 |
cv::completeSymm(M, false);
|
|
|
96 |
|
|
|
97 |
// solve for axis
|
|
|
98 |
std::vector<float> lambda;
|
|
|
99 |
cv::Mat u;
|
|
|
100 |
cv::eigen(M, lambda, u);
|
|
|
101 |
|
|
|
102 |
float minLambda = std::abs(lambda[0]);
|
|
|
103 |
int idx = 0;
|
|
|
104 |
for(unsigned int i=1; i<lambda.size(); i++){
|
|
|
105 |
if(abs(lambda[i]) < minLambda){
|
|
|
106 |
minLambda = lambda[i];
|
|
|
107 |
idx = i;
|
|
|
108 |
}
|
|
|
109 |
}
|
|
|
110 |
|
|
|
111 |
axis = u.row(idx).colRange(0, 3);
|
|
|
112 |
axis = cv::normalize(axis);
|
|
|
113 |
|
|
|
114 |
float nx = u.at<float>(idx, 0);
|
|
|
115 |
float ny = u.at<float>(idx, 1);
|
|
|
116 |
float nz = u.at<float>(idx, 2);
|
|
|
117 |
//float d = u.at<float>(idx, 3);
|
|
|
118 |
float dh = u.at<float>(idx, 4);
|
|
|
119 |
float dv = u.at<float>(idx, 5);
|
|
|
120 |
|
207 |
flgw |
121 |
// Paper version: c is initially eliminated
|
|
|
122 |
/*cv::Mat A(l*mn, mn+2, CV_32F, cv::Scalar(0.0));
|
|
|
123 |
cv::Mat bb(l*mn, 1, CV_32F);
|
196 |
jakw |
124 |
|
207 |
flgw |
125 |
for(int k=0; k<l; k++){
|
|
|
126 |
for(unsigned int idx=0; idx<mn; idx++){
|
196 |
jakw |
127 |
|
207 |
flgw |
128 |
float i = Qobj[idx].x;
|
|
|
129 |
float j = Qobj[idx].y;
|
196 |
jakw |
130 |
|
207 |
flgw |
131 |
float x = Qcam[k][idx].x;
|
|
|
132 |
float y = Qcam[k][idx].y;
|
|
|
133 |
float z = Qcam[k][idx].z;
|
196 |
jakw |
134 |
|
207 |
flgw |
135 |
float f = x*x + y*y + z*z + (2*x*nx + 2*y*ny + 2*z*nz)*(i*dh + j*dv);
|
196 |
jakw |
136 |
|
207 |
flgw |
137 |
int row = k*mn+idx;
|
|
|
138 |
A.at<float>(row, 0) = 2*x - (2*z*nx)/nz;
|
|
|
139 |
A.at<float>(row, 1) = 2*y - (2*z*ny)/nz;
|
|
|
140 |
A.at<float>(row, idx+2) = 1.0;
|
196 |
jakw |
141 |
|
207 |
flgw |
142 |
bb.at<float>(row, 0) = f + (2*z*d)/nz;
|
|
|
143 |
}
|
|
|
144 |
}
|
196 |
jakw |
145 |
|
207 |
flgw |
146 |
// solve for point
|
|
|
147 |
cv::Mat abe;
|
|
|
148 |
cv::solve(A, bb, abe, cv::DECOMP_SVD);
|
196 |
jakw |
149 |
|
207 |
flgw |
150 |
float a = abe.at<float>(0, 0);
|
|
|
151 |
float b = abe.at<float>(1, 0);
|
|
|
152 |
float c = -(nx*a+ny*b+d)/nz;
|
|
|
153 |
*/
|
196 |
jakw |
154 |
|
|
|
155 |
// Our version: solve simultanously for a,b,c
|
|
|
156 |
cv::Mat A(l*mn, mn+3, CV_32F, cv::Scalar(0.0));
|
|
|
157 |
cv::Mat bb(l*mn, 1, CV_32F);
|
|
|
158 |
|
|
|
159 |
for(int k=0; k<l; k++){
|
|
|
160 |
for(unsigned int idx=0; idx<mn; idx++){
|
|
|
161 |
|
|
|
162 |
float i = Qobj[idx].x;
|
|
|
163 |
float j = Qobj[idx].y;
|
|
|
164 |
|
|
|
165 |
float x = Qcam[k][idx].x;
|
|
|
166 |
float y = Qcam[k][idx].y;
|
|
|
167 |
float z = Qcam[k][idx].z;
|
|
|
168 |
|
|
|
169 |
float f = x*x + y*y + z*z + (2*x*nx + 2*y*ny + 2*z*nz)*(i*dh + j*dv);
|
|
|
170 |
|
|
|
171 |
int row = k*mn+idx;
|
|
|
172 |
A.at<float>(row, 0) = 2*x;
|
|
|
173 |
A.at<float>(row, 1) = 2*y;
|
|
|
174 |
A.at<float>(row, 2) = 2*z;
|
|
|
175 |
A.at<float>(row, idx+3) = 1.0;
|
|
|
176 |
|
|
|
177 |
bb.at<float>(row, 0) = f;
|
|
|
178 |
}
|
|
|
179 |
}
|
|
|
180 |
|
|
|
181 |
// solve for point
|
|
|
182 |
cv::Mat abe;
|
|
|
183 |
cv::solve(A, bb, abe, cv::DECOMP_SVD);
|
|
|
184 |
|
|
|
185 |
float a = abe.at<float>(0, 0);
|
|
|
186 |
float b = abe.at<float>(1, 0);
|
|
|
187 |
float c = abe.at<float>(2, 0);
|
|
|
188 |
|
|
|
189 |
point[0] = a;
|
|
|
190 |
point[1] = b;
|
|
|
191 |
point[2] = c;
|
|
|
192 |
|
|
|
193 |
// Non-linear optimization using Ceres
|
197 |
jakw |
194 |
double pointArray[] = {point[0], point[1], point[2]};
|
|
|
195 |
double axisArray[] = {axis[0], axis[1], axis[2]};
|
196 |
jakw |
196 |
|
|
|
197 |
ceres::Problem problem;
|
|
|
198 |
// loop through saddle points
|
|
|
199 |
for(unsigned int idx=0; idx<mn; idx++){
|
|
|
200 |
std::vector<cv::Point3f> pointsOnArch(l);
|
|
|
201 |
for(int k=0; k<l; k++){
|
|
|
202 |
pointsOnArch[k] = Qcam[k][idx];
|
|
|
203 |
}
|
|
|
204 |
ceres::CostFunction* cost_function =
|
207 |
flgw |
205 |
new ceres::AutoDiffCostFunction<CircleResidual, ceres::DYNAMIC, 3, 3>(
|
|
|
206 |
new CircleResidual(pointsOnArch), l);
|
197 |
jakw |
207 |
problem.AddResidualBlock(cost_function, NULL, pointArray, axisArray);
|
196 |
jakw |
208 |
}
|
|
|
209 |
|
|
|
210 |
// Run the solver!
|
|
|
211 |
ceres::Solver::Options options;
|
|
|
212 |
options.linear_solver_type = ceres::DENSE_QR;
|
|
|
213 |
options.minimizer_progress_to_stdout = true;
|
|
|
214 |
ceres::Solver::Summary summary;
|
|
|
215 |
ceres::Solve(options, &problem, &summary);
|
|
|
216 |
|
|
|
217 |
std::cout << summary.BriefReport() << "\n";
|
|
|
218 |
|
197 |
jakw |
219 |
point = cv::Vec3f(pointArray[0], pointArray[1], pointArray[2]);
|
|
|
220 |
axis = cv::Vec3f(axisArray[0], axisArray[1], axisArray[2]);
|
|
|
221 |
axis /= cv::norm(axis);
|
196 |
jakw |
222 |
|
|
|
223 |
|
197 |
jakw |
224 |
// Error estimate (sum of squared differences)
|
196 |
jakw |
225 |
error = 0;
|
|
|
226 |
// loop through saddle points
|
|
|
227 |
for(unsigned int idx=0; idx<mn; idx++){
|
|
|
228 |
|
|
|
229 |
// vector of distances from rotation axis
|
|
|
230 |
std::vector<float> dI(l);
|
|
|
231 |
// loop through angular positions
|
|
|
232 |
for(int k=0; k<l; k++){
|
|
|
233 |
cv::Vec3f p = cv::Vec3f(Qcam[k][idx]);
|
|
|
234 |
// point to line distance
|
|
|
235 |
dI[k] = cv::norm((point-p)-(point-p).dot(axis)*axis);
|
|
|
236 |
}
|
|
|
237 |
float sum = std::accumulate(dI.begin(), dI.end(), 0.0);
|
|
|
238 |
float mean = sum / dI.size();
|
198 |
jakw |
239 |
float meanDev = 0;
|
196 |
jakw |
240 |
for(int k=0; k<l; k++){
|
198 |
jakw |
241 |
meanDev += std::abs(dI[k] - mean);
|
196 |
jakw |
242 |
}
|
198 |
jakw |
243 |
meanDev /= l;
|
|
|
244 |
error += meanDev;
|
196 |
jakw |
245 |
}
|
198 |
jakw |
246 |
error /= mn;
|
196 |
jakw |
247 |
}
|
|
|
248 |
|
206 |
flgw |
249 |
bool processCBCorners(const cv::Size & checkerCount,
|
|
|
250 |
const cv::Mat & SMCalibrationSetI_frameX,
|
|
|
251 |
cv::Mat & SMCalibrationSetI_frameXResult,
|
|
|
252 |
std::vector<cv::Point2f> & qciX){
|
|
|
253 |
// Convert to grayscale
|
|
|
254 |
cv::Mat gray;
|
|
|
255 |
if(SMCalibrationSetI_frameX.channels() == 1)
|
|
|
256 |
cv::cvtColor(SMCalibrationSetI_frameX, gray, CV_BayerBG2GRAY);
|
|
|
257 |
else
|
|
|
258 |
cv::cvtColor(SMCalibrationSetI_frameX, gray, CV_RGB2GRAY);
|
|
|
259 |
|
|
|
260 |
// Extract checker corners
|
|
|
261 |
bool success = cv::findChessboardCorners(gray, checkerCount, qciX, cv::CALIB_CB_ADAPTIVE_THRESH + cv::CALIB_CB_FAST_CHECK);
|
|
|
262 |
if(success){
|
|
|
263 |
cv::cornerSubPix(gray, qciX, cv::Size(6, 6), cv::Size(1, 1), cv::TermCriteria(CV_TERMCRIT_EPS + CV_TERMCRIT_ITER, 20, 0.0001));
|
|
|
264 |
// Draw colored chessboard
|
|
|
265 |
if(SMCalibrationSetI_frameX.channels() == 1)
|
207 |
flgw |
266 |
cv::cvtColor(SMCalibrationSetI_frameX, SMCalibrationSetI_frameXResult, CV_BayerBG2RGB);
|
206 |
flgw |
267 |
else
|
207 |
flgw |
268 |
SMCalibrationSetI_frameXResult = SMCalibrationSetI_frameX.clone();
|
206 |
flgw |
269 |
|
207 |
flgw |
270 |
cvtools::drawChessboardCorners(SMCalibrationSetI_frameXResult, checkerCount, qciX, success, 10);
|
206 |
flgw |
271 |
}
|
|
|
272 |
return success;
|
|
|
273 |
}
|
|
|
274 |
|
207 |
flgw |
275 |
std::vector<cv::Point3f> SMCalibrationWorker::generateObjCoordsInWorldCS(const cv::Size checkerCount, const float checkerSize)
|
|
|
276 |
{
|
|
|
277 |
std::vector<cv::Point3f> Qi;
|
|
|
278 |
for (int h=0; h<checkerCount.height; h++)
|
|
|
279 |
for (int w=0; w<checkerCount.width; w++)
|
|
|
280 |
Qi.push_back(cv::Point3f(checkerSize * w, checkerSize* h, 0.0));
|
22 |
jakw |
281 |
|
207 |
flgw |
282 |
return Qi;
|
|
|
283 |
}
|
33 |
jakw |
284 |
|
207 |
flgw |
285 |
void SMCalibrationWorker::perViewReprojError(const std::vector<bool> &success0,
|
|
|
286 |
const std::vector< std::vector<cv::Point3f> > &Q0,
|
|
|
287 |
const std::vector<cv::Mat> &cam_tvecs0,
|
|
|
288 |
const std::vector<cv::Mat> &cam_rvecs0,
|
|
|
289 |
const std::vector< std::vector<cv::Point2f> > &qc0,
|
|
|
290 |
const cv::Matx33f &K0,
|
|
|
291 |
const cv::Vec<float, 5> &k0,
|
|
|
292 |
std::vector<float> &camX_errors_per_view)
|
|
|
293 |
{
|
|
|
294 |
unsigned int nSets = success0.size();
|
|
|
295 |
camX_errors_per_view.resize(nSets);
|
|
|
296 |
int idx = 0;
|
|
|
297 |
//#pragma omp parallel for
|
|
|
298 |
for(unsigned int i = 0; i < nSets; ++i){
|
|
|
299 |
if(success0[i]){
|
|
|
300 |
int n = (int)Q0[idx].size();
|
|
|
301 |
std::vector<cv::Point2f> qc_proj;
|
|
|
302 |
cv::projectPoints(cv::Mat(Q0[idx]), cam_rvecs0[idx], cam_tvecs0[idx], K0, k0, qc_proj);
|
|
|
303 |
float err = 0;
|
|
|
304 |
for(unsigned int j=0; j<qc_proj.size(); j++){
|
|
|
305 |
cv::Point2f d = qc0[idx][j] - qc_proj[j];
|
|
|
306 |
err += cv::sqrt(d.x*d.x + d.y*d.y);
|
|
|
307 |
}
|
|
|
308 |
camX_errors_per_view[i] = (float)err/n;
|
|
|
309 |
idx++;
|
|
|
310 |
} else
|
|
|
311 |
camX_errors_per_view[i] = NAN;
|
|
|
312 |
}
|
|
|
313 |
}
|
22 |
jakw |
314 |
|
207 |
flgw |
315 |
void SMCalibrationWorker::performCameraCalibration(std::vector<SMCalibrationSet> calibrationData){
|
|
|
316 |
|
|
|
317 |
QSettings settings;
|
|
|
318 |
cv::Size checkerCount(
|
|
|
319 |
cv::Size(settings.value("calibration/patternSizeX", 22).toInt(),settings.value("calibration/patternSizeY", 13).toInt()));
|
167 |
jakw |
320 |
unsigned int nSets = calibrationData.size();
|
22 |
jakw |
321 |
|
148 |
jakw |
322 |
// 2D Points collected for OpenCV's calibration procedures
|
207 |
flgw |
323 |
#define OLD_WAYZ 0
|
|
|
324 |
#if OLD_WAYZ
|
|
|
325 |
std::vector< std::vector<cv::Point2f> > qc0, qc1, qc0Stereo, qc1Stereo;
|
|
|
326 |
std::vector<bool> fwdToStageEstimation;
|
31 |
jakw |
327 |
std::vector<float> angles;
|
207 |
flgw |
328 |
std::vector<unsigned int> TESTER;
|
|
|
329 |
#else
|
|
|
330 |
std::vector< std::vector<cv::Point2f> > qc0(nSets), qc1(nSets), qc0Stereo(nSets), qc1Stereo(nSets);
|
|
|
331 |
std::vector<bool> fwdToStageEstimation(nSets);
|
|
|
332 |
std::vector<float> angles(nSets);
|
|
|
333 |
std::vector<unsigned int> TESTER(nSets);
|
|
|
334 |
#endif
|
|
|
335 |
std::vector<bool> success0(nSets, false), success1(nSets, false);
|
22 |
jakw |
336 |
// Loop through calibration sets
|
207 |
flgw |
337 |
#if !OLD_WAYZ
|
|
|
338 |
#pragma omp parallel for
|
|
|
339 |
#endif
|
167 |
jakw |
340 |
for(unsigned int i=0; i<nSets; i++){
|
22 |
jakw |
341 |
|
27 |
jakw |
342 |
SMCalibrationSet SMCalibrationSetI = calibrationData[i];
|
25 |
jakw |
343 |
|
207 |
flgw |
344 |
// TODO this changes semantics of the checkboxes in the GUI
|
|
|
345 |
/*if(!SMCalibrationSetI.checked)
|
|
|
346 |
continue;*/
|
25 |
jakw |
347 |
|
|
|
348 |
// Camera 0
|
|
|
349 |
std::vector<cv::Point2f> qci0;
|
207 |
flgw |
350 |
success0[i] = processCBCorners(checkerCount, SMCalibrationSetI.frame0, SMCalibrationSetI.frame0Result, qci0);
|
|
|
351 |
#pragma omp critical (CBCALCupdateUI1)
|
|
|
352 |
{
|
148 |
jakw |
353 |
emit newFrameResult(i, 0, success0[i], SMCalibrationSetI.frame0Result);
|
207 |
flgw |
354 |
}
|
25 |
jakw |
355 |
// Camera 1
|
|
|
356 |
std::vector<cv::Point2f> qci1;
|
207 |
flgw |
357 |
success1[i] = processCBCorners(checkerCount, SMCalibrationSetI.frame1, SMCalibrationSetI.frame1Result, qci1);
|
|
|
358 |
#pragma omp critical (CBCALCupdateUI2)
|
|
|
359 |
{
|
148 |
jakw |
360 |
emit newFrameResult(i, 1, success1[i], SMCalibrationSetI.frame1Result);
|
207 |
flgw |
361 |
}
|
|
|
362 |
// store results
|
|
|
363 |
#if OLD_WAYZ
|
148 |
jakw |
364 |
if(success0[i])
|
137 |
jakw |
365 |
qc0.push_back(qci0);
|
25 |
jakw |
366 |
|
148 |
jakw |
367 |
if(success1[i])
|
31 |
jakw |
368 |
qc1.push_back(qci1);
|
137 |
jakw |
369 |
|
148 |
jakw |
370 |
if(success0[i] && success1[i]){
|
137 |
jakw |
371 |
qc0Stereo.push_back(qci0);
|
|
|
372 |
qc1Stereo.push_back(qci1);
|
31 |
jakw |
373 |
angles.push_back(SMCalibrationSetI.rotationAngle);
|
207 |
flgw |
374 |
fwdToStageEstimation.push_back(SMCalibrationSetI.checked);
|
|
|
375 |
TESTER.push_back(i);
|
22 |
jakw |
376 |
}
|
207 |
flgw |
377 |
#else
|
|
|
378 |
qc0[i] = qci0;
|
|
|
379 |
qc1[i] = qci1;
|
22 |
jakw |
380 |
|
207 |
flgw |
381 |
qc0Stereo[i] = qci0;
|
|
|
382 |
qc1Stereo[i] = qci1;
|
|
|
383 |
angles[i] = SMCalibrationSetI.rotationAngle;
|
|
|
384 |
fwdToStageEstimation[i] = SMCalibrationSetI.checked;
|
|
|
385 |
TESTER[i] = (i);
|
|
|
386 |
#endif
|
27 |
jakw |
387 |
// Show progress
|
207 |
flgw |
388 |
#pragma omp critical (CBCALCupdateUI3)
|
|
|
389 |
{
|
|
|
390 |
emit newSetProcessed(i);
|
|
|
391 |
}
|
22 |
jakw |
392 |
}
|
207 |
flgw |
393 |
#if !OLD_WAYZ
|
|
|
394 |
auto s0it = success0.cbegin();
|
|
|
395 |
qc0.erase(std::remove_if( qc0.begin(), qc0.end(), [&s0it](std::vector<cv::Point2f>){return !*s0it++;}),
|
|
|
396 |
qc0.end());
|
22 |
jakw |
397 |
|
207 |
flgw |
398 |
auto s1it = success1.cbegin();
|
|
|
399 |
qc1.erase(std::remove_if( qc1.begin(), qc1.end(), [&s1it](std::vector<cv::Point2f>){return !*s1it++;}),
|
|
|
400 |
qc1.end());
|
|
|
401 |
|
|
|
402 |
s0it = success0.cbegin(); s1it = success1.cbegin();
|
|
|
403 |
qc0Stereo.erase(std::remove_if( qc0Stereo.begin(), qc0Stereo.end(), [&s0it,&s1it](std::vector<cv::Point2f>){return !((*s0it++)&(*s1it++));}),
|
|
|
404 |
qc0Stereo.end());
|
|
|
405 |
|
|
|
406 |
s0it = success0.cbegin(); s1it = success1.cbegin();
|
|
|
407 |
qc1Stereo.erase(std::remove_if( qc1Stereo.begin(), qc1Stereo.end(), [&s0it,&s1it](std::vector<cv::Point2f>){return !((*s0it++)&(*s1it++));}),
|
|
|
408 |
qc1Stereo.end());
|
|
|
409 |
|
|
|
410 |
s0it = success0.cbegin(); s1it = success1.cbegin();
|
|
|
411 |
angles.erase(std::remove_if( angles.begin(), angles.end(), [&s0it,&s1it](float){return !((*s0it++)&(*s1it++));}),
|
|
|
412 |
angles.end());
|
|
|
413 |
|
|
|
414 |
s0it = success0.cbegin(); s1it = success1.cbegin();
|
|
|
415 |
fwdToStageEstimation.erase(std::remove_if( fwdToStageEstimation.begin(), fwdToStageEstimation.end(), [&s0it,&s1it](bool){return !((*s0it++)&(*s1it++));}),
|
|
|
416 |
fwdToStageEstimation.end());
|
|
|
417 |
|
|
|
418 |
s0it = success0.cbegin(); s1it = success1.cbegin();
|
|
|
419 |
TESTER.erase(std::remove_if( TESTER.begin(), TESTER.end(), [&s0it,&s1it](bool){return !((*s0it++)&(*s1it++));}),
|
|
|
420 |
TESTER.end());
|
|
|
421 |
/*for(unsigned int i=0; i<nSets; i++){
|
|
|
422 |
if(~success0[i])
|
|
|
423 |
std::remove(qc0.begin(),qc0.end(),i);
|
|
|
424 |
if(~success1[i])
|
|
|
425 |
std::remove(qc1.begin(),qc1.end(),i);
|
|
|
426 |
if(~success0[i] || ~success1[i]){
|
|
|
427 |
std::remove(qc0Stereo.begin(),qc0Stereo.end(),i);
|
|
|
428 |
std::remove(qc1Stereo.begin(),qc1Stereo.end(),i);
|
|
|
429 |
std::remove(angles.begin(),angles.end(),i);
|
|
|
430 |
std::remove(fwdToStageEstimation.begin(),fwdToStageEstimation.end(),i);
|
|
|
431 |
}
|
|
|
432 |
}*/
|
|
|
433 |
#endif
|
|
|
434 |
std::copy(TESTER.begin(), TESTER.end(), std::ostream_iterator<unsigned int>(std::cout, " "));
|
|
|
435 |
std::cout << std::endl;
|
|
|
436 |
const size_t nValidStereoSets = angles.size();
|
|
|
437 |
if(nValidStereoSets < 2){
|
22 |
jakw |
438 |
std::cerr << "Not enough valid calibration sequences!" << std::endl;
|
29 |
jakw |
439 |
emit done();
|
22 |
jakw |
440 |
return;
|
|
|
441 |
}
|
|
|
442 |
|
|
|
443 |
// Generate world object coordinates [mm]
|
207 |
flgw |
444 |
std::vector<cv::Point3f> Qi = generateObjCoordsInWorldCS(checkerCount,
|
|
|
445 |
settings.value("calibration/squareSize", 10.0).toFloat());
|
137 |
jakw |
446 |
|
207 |
flgw |
447 |
std::vector< std::vector<cv::Point3f> >
|
|
|
448 |
Q0(qc0.size(), Qi),
|
|
|
449 |
Q1(qc1.size(), Qi),
|
|
|
450 |
QStereo(nValidStereoSets, Qi);
|
22 |
jakw |
451 |
|
|
|
452 |
// calibrate the cameras
|
31 |
jakw |
453 |
SMCalibrationParameters cal;
|
|
|
454 |
cal.frameWidth = calibrationData[0].frame0.cols;
|
|
|
455 |
cal.frameHeight = calibrationData[0].frame0.rows;
|
|
|
456 |
cv::Size frameSize(cal.frameWidth, cal.frameHeight);
|
22 |
jakw |
457 |
|
68 |
jakw |
458 |
// determine only k1, k2 for lens distortion
|
140 |
jakw |
459 |
int flags = cv::CALIB_FIX_ASPECT_RATIO + cv::CALIB_FIX_K3 + cv::CALIB_ZERO_TANGENT_DIST + cv::CALIB_FIX_PRINCIPAL_POINT;
|
33 |
jakw |
460 |
// Note: several of the output arguments below must be cv::Mat, otherwise segfault
|
|
|
461 |
std::vector<cv::Mat> cam_rvecs0, cam_tvecs0;
|
137 |
jakw |
462 |
cal.cam0_error = cv::calibrateCamera(Q0, qc0, frameSize, cal.K0, cal.k0, cam_rvecs0, cam_tvecs0, flags,
|
134 |
jakw |
463 |
cv::TermCriteria(cv::TermCriteria::COUNT+cv::TermCriteria::EPS, 100, DBL_EPSILON));
|
207 |
flgw |
464 |
//std::cout << cal.k0 << std::endl;
|
|
|
465 |
// // refine extrinsics for camera 0
|
|
|
466 |
// for(int i=0; i<Q.size(); i++)
|
|
|
467 |
// cv::solvePnPRansac(Q[i], qc0[i], cal.K0, cal.k0, cam_rvecs0[i], cam_tvecs0[i], true, 100, 0.05, 100, cv::noArray(), CV_ITERATIVE);
|
86 |
jakw |
468 |
|
33 |
jakw |
469 |
std::vector<cv::Mat> cam_rvecs1, cam_tvecs1;
|
137 |
jakw |
470 |
cal.cam1_error = cv::calibrateCamera(Q1, qc1, frameSize, cal.K1, cal.k1, cam_rvecs1, cam_tvecs1, flags,
|
134 |
jakw |
471 |
cv::TermCriteria(cv::TermCriteria::COUNT+cv::TermCriteria::EPS, 100, DBL_EPSILON));
|
207 |
flgw |
472 |
//std::cout << cal.k1 << std::endl;
|
111 |
jakw |
473 |
// stereo calibration
|
136 |
jakw |
474 |
int flags_stereo = cv::CALIB_FIX_INTRINSIC;// + cv::CALIB_FIX_K2 + cv::CALIB_FIX_K3 + cv::CALIB_ZERO_TANGENT_DIST + cv::CALIB_FIX_PRINCIPAL_POINT + cv::CALIB_FIX_ASPECT_RATIO;
|
33 |
jakw |
475 |
cv::Mat E, F, R1, T1;
|
22 |
jakw |
476 |
|
207 |
flgw |
477 |
#if CV_MAJOR_VERSION < 3
|
|
|
478 |
cal.stereo_error = cv::stereoCalibrate(QStereo, qc0Stereo, qc1Stereo, cal.K0, cal.k0, cal.K1, cal.k1,
|
|
|
479 |
frameSize, R1, T1, E, F,
|
|
|
480 |
cv::TermCriteria(cv::TermCriteria::COUNT + cv::TermCriteria::EPS, 200, DBL_EPSILON),
|
|
|
481 |
flags_stereo);
|
|
|
482 |
#else
|
|
|
483 |
cal.stereo_error = cv::stereoCalibrate(QStereo, qc0Stereo, qc1Stereo, cal.K0, cal.k0, cal.K1, cal.k1,
|
|
|
484 |
frameSize, R1, T1, E, F,flags_stereo,
|
|
|
485 |
cv::TermCriteria(cv::TermCriteria::COUNT + cv::TermCriteria::EPS, 200, DBL_EPSILON));
|
|
|
486 |
#endif
|
|
|
487 |
|
33 |
jakw |
488 |
cal.R1 = R1;
|
|
|
489 |
cal.T1 = T1;
|
|
|
490 |
cal.E = E;
|
|
|
491 |
cal.F = F;
|
|
|
492 |
|
148 |
jakw |
493 |
// Determine per-view reprojection errors:
|
207 |
flgw |
494 |
perViewReprojError(success0, Q0, cam_tvecs0, cam_rvecs0, qc0, cal.K0, cal.k0, cal.cam0_errors_per_view);
|
|
|
495 |
perViewReprojError(success1, Q1, cam_tvecs1, cam_rvecs1, qc1, cal.K1, cal.k1, cal.cam1_errors_per_view);
|
148 |
jakw |
496 |
|
207 |
flgw |
497 |
// This would be so much nicer with range adaptors
|
|
|
498 |
std::vector< std::vector<cv::Point2f> > qc0StereoFwd2Stage;
|
|
|
499 |
std::vector< std::vector<cv::Point2f> > qc1StereoFwd2Stage;
|
31 |
jakw |
500 |
|
207 |
flgw |
501 |
for(size_t i=0; i<qc0Stereo.size(); i++ )
|
|
|
502 |
if(fwdToStageEstimation[i])
|
|
|
503 |
qc0StereoFwd2Stage.push_back(qc0Stereo[i]);
|
33 |
jakw |
504 |
|
207 |
flgw |
505 |
for(size_t i=0; i<qc1Stereo.size(); i++ )
|
|
|
506 |
if(fwdToStageEstimation[i])
|
|
|
507 |
qc1StereoFwd2Stage.push_back(qc1Stereo[i]);
|
91 |
jakw |
508 |
|
207 |
flgw |
509 |
performStageCalibration(qc0StereoFwd2Stage,qc1StereoFwd2Stage,cal);
|
91 |
jakw |
510 |
|
207 |
flgw |
511 |
// Print to std::cout
|
|
|
512 |
std::cout << std::endl << "========== BEGIN Calibration info ================================================" << std::endl;
|
|
|
513 |
std::cout << "Num. Images used for intrinsics of cam0: " << qc0.size() << std::endl;
|
|
|
514 |
std::cout << "Num. Images used for intrinsics of cam1: " << qc1.size() << std::endl;
|
|
|
515 |
std::cout << "Num. Images used for extrinsocs of cam1: " << nValidStereoSets << std::endl;
|
|
|
516 |
std::cout << "Num. Images used for rotation stage axis estim.: " << qc0StereoFwd2Stage.size() << std::endl << std::endl;
|
|
|
517 |
cal.print();
|
|
|
518 |
std::cout << "========== END Calibration info ================================================" << std::endl << std::endl;
|
91 |
jakw |
519 |
|
207 |
flgw |
520 |
// save to (reentrant qsettings object)
|
|
|
521 |
settings.setValue("calibration/parameters", QVariant::fromValue(cal));
|
91 |
jakw |
522 |
|
207 |
flgw |
523 |
emit done();
|
|
|
524 |
}
|
91 |
jakw |
525 |
|
33 |
jakw |
526 |
|
207 |
flgw |
527 |
void SMCalibrationWorker::performStageCalibration(
|
|
|
528 |
const std::vector< std::vector<cv::Point2f> > &qc0Stereo,
|
|
|
529 |
const std::vector< std::vector<cv::Point2f> > &qc1Stereo,
|
|
|
530 |
SMCalibrationParameters& cal){
|
|
|
531 |
assert(qc0Stereo.size()==qc1Stereo.size());
|
|
|
532 |
if(qc0Stereo.size()>1 && qc1Stereo.size()>1){
|
|
|
533 |
// save to (reentrant qsettings object)
|
|
|
534 |
QSettings settings;
|
81 |
jakw |
535 |
|
207 |
flgw |
536 |
// Generate world object coordinates [mm]
|
|
|
537 |
std::vector<cv::Point3f> Qi = generateObjCoordsInWorldCS(
|
|
|
538 |
cv::Size(settings.value("calibration/patternSizeX", 22).toInt(),settings.value("calibration/patternSizeY", 13).toInt()),
|
|
|
539 |
settings.value("calibration/squareSize", 10.0).toFloat());
|
91 |
jakw |
540 |
|
207 |
flgw |
541 |
// Direct rotation axis calibration //
|
|
|
542 |
// full camera matrices
|
|
|
543 |
cv::Matx34f P0 = cv::Matx34f::eye();
|
|
|
544 |
cv::Mat RT1(3, 4, CV_32F);
|
|
|
545 |
cv::Mat(cal.R1).copyTo(RT1(cv::Range(0, 3), cv::Range(0, 3)));
|
|
|
546 |
cv::Mat(cal.T1).copyTo(RT1(cv::Range(0, 3), cv::Range(3, 4)));
|
|
|
547 |
cv::Matx34f P1 = cv::Matx34f(RT1);
|
81 |
jakw |
548 |
|
207 |
flgw |
549 |
// calibration points in camera 0 frame
|
|
|
550 |
std::vector< std::vector<cv::Point3f> > Qcam(qc0Stereo.size());
|
|
|
551 |
#pragma omp parallel for
|
|
|
552 |
for(unsigned int i=0; i<qc0Stereo.size(); i++){
|
|
|
553 |
std::vector<cv::Point2f> qc0i, qc1i;
|
81 |
jakw |
554 |
|
207 |
flgw |
555 |
cv::undistortPoints(qc0Stereo[i], qc0i, cal.K0, cal.k0);
|
|
|
556 |
cv::undistortPoints(qc1Stereo[i], qc1i, cal.K1, cal.k1);
|
81 |
jakw |
557 |
|
207 |
flgw |
558 |
cv::Mat Qhom, Qcami;
|
|
|
559 |
cv::triangulatePoints(P0, P1, qc0i, qc1i, Qhom);
|
|
|
560 |
cvtools::convertMatFromHomogeneous(Qhom, Qcami);
|
|
|
561 |
std::vector<cv::Point3f> QcamiPoints;
|
|
|
562 |
cvtools::matToPoints3f(Qcami, QcamiPoints);
|
33 |
jakw |
563 |
|
207 |
flgw |
564 |
Qcam[i] = QcamiPoints;
|
|
|
565 |
}
|
81 |
jakw |
566 |
|
207 |
flgw |
567 |
cv::Vec3f axis, point;
|
|
|
568 |
float rot_axis_error;
|
|
|
569 |
rotationAxisCalibration(Qcam, Qi, axis, point, rot_axis_error);
|
84 |
jakw |
570 |
|
207 |
flgw |
571 |
// construct transformation matrix
|
|
|
572 |
cv::Vec3f ex = axis.cross(cv::Vec3f(0,0,1.0));
|
|
|
573 |
ex = cv::normalize(ex);
|
|
|
574 |
cv::Vec3f ez = ex.cross(axis);
|
|
|
575 |
ez = cv::normalize(ez);
|
84 |
jakw |
576 |
|
207 |
flgw |
577 |
cv::Mat RrMat(3, 3, CV_32F);
|
|
|
578 |
cv::Mat(ex).copyTo(RrMat.col(0));
|
|
|
579 |
cv::Mat(axis).copyTo(RrMat.col(1));
|
|
|
580 |
cv::Mat(ez).copyTo(RrMat.col(2));
|
|
|
581 |
|
|
|
582 |
cal.Rr = cv::Matx33f(RrMat).t();
|
|
|
583 |
cal.Tr = -cv::Matx33f(RrMat).t()*point;
|
|
|
584 |
cal.rot_axis_error = rot_axis_error;
|
|
|
585 |
}else{
|
|
|
586 |
cal.Rr = cv::Matx33f::eye();
|
|
|
587 |
cal.Tr = cv::Vec3f(0,0,0);
|
|
|
588 |
cal.rot_axis_error = -1;
|
91 |
jakw |
589 |
}
|
22 |
jakw |
590 |
}
|