Rev 190 | Rev 236 | Go to most recent revision | Blame | Compare with Previous | Last modification | View Log | RSS feed
//
// Three Frequency Phase Shifting using the Heterodyne Principle
//
// This implementation follows "Reich, Ritter, Thesing, White light heterodyne principle for 3D-measurement", SPIE (1997)
//
// The number of periods in the First and Second frequencies can be chosen freely, but small changes can have a considerable impact on quality.
// The number of phase shifts can be chosen freely (min. 3), and higher values reduce the effects of image noise. They also allow us to filter bad points based on energy at non-First frequencies.
//
#include "AlgorithmPhaseShiftThreeFreq.h"
#include <math.h>
#include "cvtools.h"
#include "algorithmtools.h"
static unsigned int nStepsFirst = 8; // number of shifts/steps in First
static unsigned int nStepsSecond = 8; // number of shifts/steps in Second
static unsigned int nStepsThird = 8; // number of shifts/steps in Third
static float nPeriodsFirst = 24; // First period
static float nPeriodsSecond = 30; // First period
AlgorithmPhaseShiftThreeFreq::AlgorithmPhaseShiftThreeFreq(unsigned int _screenCols, unsigned int _screenRows) : Algorithm(_screenCols, _screenRows){
// Set N
N = 2+nStepsFirst+nStepsSecond+nStepsThird;
// Determine the third number of periods to fulfill the heterodyne condition
float nPeriodsThird = 1 + 2*nPeriodsSecond - nPeriodsFirst;
// all on pattern
cv::Mat allOn(1, screenCols, CV_8UC3, cv::Scalar::all(255));
patterns.push_back(allOn);
// all off pattern
cv::Mat allOff(1, screenCols, CV_8UC3, cv::Scalar::all(0));
patterns.push_back(allOff);
// Precompute encoded patterns
// First encoding patterns
for(unsigned int i=0; i<nStepsFirst; i++){
float phase = 2.0*CV_PI/nStepsFirst * i;
float pitch = screenCols/nPeriodsFirst;
cv::Mat patternI(1,1,CV_8U);
patternI = computePhaseVector(screenCols, phase, pitch);
patterns.push_back(patternI.t());
}
// Second encoding patterns
for(unsigned int i=0; i<nStepsSecond; i++){
float phase = 2.0*CV_PI/nStepsSecond * i;
float pitch = screenCols/nPeriodsSecond;
cv::Mat patternI(1,1,CV_8U);
patternI = computePhaseVector(screenCols, phase, pitch);
patterns.push_back(patternI.t());
}
// Third encoding patterns
for(unsigned int i=0; i<nStepsThird; i++){
float phase = 2.0*CV_PI/nStepsThird * i;
float pitch = screenCols/nPeriodsThird;
cv::Mat patternI(1,1,CV_8U);
patternI = computePhaseVector(screenCols, phase, pitch);
patterns.push_back(patternI.t());
}
}
cv::Mat AlgorithmPhaseShiftThreeFreq::getEncodingPattern(unsigned int depth){
return patterns[depth];
}
void AlgorithmPhaseShiftThreeFreq::get3DPoints(SMCalibrationParameters calibration, const std::vector<cv::Mat>& frames0, const std::vector<cv::Mat>& frames1, std::vector<cv::Point3f>& Q, std::vector<cv::Vec3b>& color){
assert(frames0.size() == N);
assert(frames1.size() == N);
int frameRows = frames0[0].rows;
int frameCols = frames0[0].cols;
float nPeriodsThird = 1 + 2*nPeriodsSecond - nPeriodsFirst;
// Rectifying homographies (rotation+projections)
cv::Size frameSize(frameCols, frameRows);
cv::Mat R, T;
// stereoRectify segfaults unless R is double precision
cv::Mat(calibration.R1).convertTo(R, CV_64F);
cv::Mat(calibration.T1).convertTo(T, CV_64F);
cv::Mat R0, R1, P0, P1, QRect;
cv::stereoRectify(calibration.K0, calibration.k0, calibration.K1, calibration.k1, frameSize, R, T, R0, R1, P0, P1, QRect, 0);
// Interpolation maps (lens distortion and rectification)
cv::Mat map0X, map0Y, map1X, map1Y;
cv::initUndistortRectifyMap(calibration.K0, calibration.k0, R0, P0, frameSize, CV_32F, map0X, map0Y);
cv::initUndistortRectifyMap(calibration.K1, calibration.k1, R1, P1, frameSize, CV_32F, map1X, map1Y);
// Gray-scale and remap
std::vector<cv::Mat> frames0Rect(N);
std::vector<cv::Mat> frames1Rect(N);
for(unsigned int i=0; i<N; i++){
cv::Mat temp;
cv::cvtColor(frames0[i], temp, CV_BayerBG2GRAY);
cv::remap(temp, frames0Rect[i], map0X, map0Y, CV_INTER_LINEAR);
cv::cvtColor(frames1[i], temp, CV_BayerBG2GRAY);
cv::remap(temp, frames1Rect[i], map1X, map1Y, CV_INTER_LINEAR);
}
// Decode camera0
std::vector<cv::Mat> frames0First(frames0Rect.begin()+2, frames0Rect.begin()+2+nStepsFirst);
std::vector<cv::Mat> frames0Second(frames0Rect.begin()+2+nStepsFirst, frames0Rect.end()-nStepsThird);
std::vector<cv::Mat> frames0Third(frames0Rect.end()-nStepsThird, frames0Rect.end());
std::vector<cv::Mat> F0First = getDFTComponents(frames0First);
cv::Mat up0First;
cv::phase(F0First[2], -F0First[3], up0First);
std::vector<cv::Mat> F0Second = getDFTComponents(frames0Second);
cv::Mat up0Second;
cv::phase(F0Second[2], -F0Second[3], up0Second);
std::vector<cv::Mat> F0Third = getDFTComponents(frames0Third);
cv::Mat up0Third;
cv::phase(F0Third[2], -F0Third[3], up0Third);
cv::Mat up0FS = up0Second - up0First;
up0FS = cvtools::modulo(up0FS, 2.0*CV_PI);
cv::Mat up0ST = up0Third - up0Second;
up0ST = cvtools::modulo(up0ST, 2.0*CV_PI);
cv::Mat up0FST = up0ST - up0FS;
up0FST = cvtools::modulo(up0FST, 2.0*CV_PI);
cv::Mat up0F = unwrapWithCue(up0First, up0FST, nPeriodsFirst);
cv::Mat up0S = unwrapWithCue(up0Second, up0FST, nPeriodsSecond);
cv::Mat up0T = unwrapWithCue(up0Third, up0FST, nPeriodsThird);
cv::Mat up0Mean = 1.0/3.0 * (up0F + up0S + up0T);
cv::Mat up0Range = cv::max(up0F, cv::max(up0S, up0T)) - cv::min(up0F, cv::min(up0S, up0T));
cv::Mat up0 = up0Mean * screenCols/(2.0*CV_PI);
cv::Mat amplitude0;
cv::magnitude(F0First[2], -F0First[3], amplitude0);
// Decode camera1
std::vector<cv::Mat> frames1First(frames1Rect.begin()+2, frames1Rect.begin()+2+nStepsFirst);
std::vector<cv::Mat> frames1Second(frames1Rect.begin()+2+nStepsFirst, frames1Rect.end()-nStepsThird);
std::vector<cv::Mat> frames1Third(frames1Rect.end()-nStepsThird, frames1Rect.end());
std::vector<cv::Mat> F1First = getDFTComponents(frames1First);
cv::Mat up1First;
cv::phase(F1First[2], -F1First[3], up1First);
std::vector<cv::Mat> F1Second = getDFTComponents(frames1Second);
cv::Mat up1Second;
cv::phase(F1Second[2], -F1Second[3], up1Second);
std::vector<cv::Mat> F1Third = getDFTComponents(frames1Third);
cv::Mat up1Third;
cv::phase(F1Third[2], -F1Third[3], up1Third);
cv::Mat up1FS = up1Second - up1First;
up1FS = cvtools::modulo(up1FS, 2.0*CV_PI);
cv::Mat up1ST = up1Third - up1Second;
up1ST = cvtools::modulo(up1ST, 2.0*CV_PI);
cv::Mat up1FST = up1ST - up1FS;
up1FST = cvtools::modulo(up1FST, 2.0*CV_PI);
cv::Mat up1F = unwrapWithCue(up1First, up1FST, nPeriodsFirst);
cv::Mat up1S = unwrapWithCue(up1Second, up1FST, nPeriodsSecond);
cv::Mat up1T = unwrapWithCue(up1Third, up1FST, nPeriodsThird);
cv::Mat up1Mean = 1.0/3.0 * (up1F + up1S + up1T);
cv::Mat up1Range = cv::max(up1F, cv::max(up1S, up1T)) - cv::min(up1F, cv::min(up1S, up1T));
cv::Mat up1 = up1Mean * screenCols/(2.0*CV_PI);
cv::Mat amplitude1;
cv::magnitude(F1First[2], -F1First[3], amplitude1);
#ifdef QT_DEBUG
cvtools::writeMat(up0First, "up0First.mat", "up0First");
cvtools::writeMat(up0Second, "up0Second.mat", "up0Second");
cvtools::writeMat(up0Third, "up0Third.mat", "up0Third");
cvtools::writeMat(up0FS, "up0FS.mat", "up0FS");
cvtools::writeMat(up0ST, "up0ST.mat", "up0ST");
cvtools::writeMat(up0FST, "up0FST.mat", "up0FST");
cvtools::writeMat(up0Mean, "up0Mean.mat", "up0Mean");
cvtools::writeMat(up0Range, "up0Range.mat", "up0Range");
cvtools::writeMat(up0, "up0.mat", "up0");
cvtools::writeMat(up1, "up1.mat", "up1");
cvtools::writeMat(amplitude0, "amplitude0.mat", "amplitude0");
cvtools::writeMat(amplitude0, "amplitude0.mat", "amplitude0");
cvtools::writeMat(amplitude1, "amplitude1.mat", "amplitude1");
#endif
// color debayer and remap
cv::Mat color0, color1;
cv::cvtColor(frames0[0], color0, CV_BayerBG2RGB);
cv::remap(color0, color0, map0X, map0Y, CV_INTER_LINEAR);
cv::cvtColor(frames1[0], color1, CV_BayerBG2RGB);
cv::remap(color1, color1, map1X, map1Y, CV_INTER_LINEAR);
#ifdef QT_DEBUG
cvtools::writeMat(color0, "color0.mat", "color0");
cvtools::writeMat(color1, "color1.mat", "color1");
#endif
// Occlusion masks
cv::Mat occlusion0, occlusion1;
cv::subtract(frames0Rect[0], frames0Rect[1], occlusion0);
occlusion0 = (occlusion0 > 25) & (occlusion0 < 250);
cv::subtract(frames1Rect[0], frames1Rect[1], occlusion1);
occlusion1 = (occlusion1 > 25) & (occlusion1 < 250);
// Threshold on range
occlusion0 = occlusion0 & (up0Range < 0.05);
occlusion1 = occlusion1 & (up1Range < 0.05);
// // Erode occlusion masks
// cv::Mat strel = cv::getStructuringElement(cv::MORPH_ELLIPSE, cv::Size(5,5));
// cv::erode(occlusion0, occlusion0, strel);
// cv::erode(occlusion1, occlusion1, strel);
// Threshold on gradient of phase
cv::Mat edges0;
cv::Sobel(up0, edges0, -1, 1, 1, 5);
occlusion0 = occlusion0 & (abs(edges0) < 150);
cv::Mat edges1;
cv::Sobel(up1, edges1, -1, 1, 1, 5);
occlusion1 = occlusion1 & (abs(edges1) < 150);
#ifdef QT_DEBUG
cvtools::writeMat(edges0, "edges0.mat", "edges0");
cvtools::writeMat(edges1, "edges1.mat", "edges1");
#endif
#ifdef QT_DEBUG
cvtools::writeMat(occlusion0, "occlusion0.mat", "occlusion0");
cvtools::writeMat(occlusion1, "occlusion1.mat", "occlusion1");
#endif
// Match phase maps
int frameRectRows = map0X.rows;
int frameRectCols = map0X.cols;
// camera0 against camera1
std::vector<cv::Vec2f> q0, q1;
for(int row=0; row<frameRectRows; row++){
for(int col=0; col<frameRectCols; col++){
if(!occlusion0.at<char>(row,col))
continue;
float up0i = up0.at<float>(row,col);
for(int col1=0; col1<up1.cols-1; col1++){
if(!occlusion1.at<char>(row,col1) || !occlusion1.at<char>(row,col1+1))
continue;
float up1Left = up1.at<float>(row,col1);
float up1Right = up1.at<float>(row,col1+1);
if((up1Left <= up0i) && (up0i <= up1Right) && (up0i-up1Left < 1.0) && (up1Right-up0i < 1.0) && (up1Right-up1Left > 0.1)){
float col1i = col1 + (up0i-up1Left)/(up1Right-up1Left);
q0.push_back(cv::Point2f(col, row));
q1.push_back(cv::Point2f(col1i, row));
break;
}
}
}
}
int nMatches = q0.size();
if(nMatches < 1){
Q.resize(0);
color.resize(0);
return;
}
// Retrieve color information
color.resize(nMatches);
for(int i=0; i<nMatches; i++){
cv::Vec3b c0 = color0.at<cv::Vec3b>(q0[i][1], q0[i][0]);
cv::Vec3b c1 = color1.at<cv::Vec3b>(q1[i][1], q1[i][0]);
color[i] = 0.5*c0 + 0.5*c1;
}
// Triangulate points
cv::Mat QMatHomogenous, QMat;
cv::triangulatePoints(P0, P1, q0, q1, QMatHomogenous);
cvtools::convertMatFromHomogeneous(QMatHomogenous, QMat);
// Undo rectification
cv::Mat R0Inv;
cv::Mat(R0.t()).convertTo(R0Inv, CV_32F);
QMat = R0Inv*QMat;
cvtools::matToPoints3f(QMat, Q);
}