Rev 27 | Rev 33 | Go to most recent revision | Blame | Compare with Previous | Last modification | View Log | RSS feed
#include "SMReconstructionWorker.h"
#include "CodecGrayCode.h"
#include "CodecPhaseShift.h"
#include <QCoreApplication>
#include <QSettings>
#include <iostream>
#include <opencv2/opencv.hpp>
#include <pcl/filters/statistical_outlier_removal.h>
#include <pcl/io/pcd_io.h>
void SMReconstructionWorker::setup(){
QSettings settings;
// Get current calibration
calibration = settings.value("calibration").value<SMCalibrationParameters>();
// Create decoder
dir = (CodecDir)settings.value("pattern/direction", CodecDirHorizontal).toInt();
if(dir == CodecDirNone)
std::cerr << "SMCaptureWorker: invalid coding direction " << std::endl;
QString codec = settings.value("codec", "GrayCode").toString();
if(codec == "PhaseShift")
decoder = new DecoderPhaseShift(dir);
else if(codec == "GrayCode")
decoder = new DecoderGrayCode(dir);
else
std::cerr << "SLScanWorker: invalid codec " << codec.toStdString() << std::endl;
// Precompute lens correction maps
cv::Mat eye = cv::Mat::eye(3, 3, CV_32F);
cv::initUndistortRectifyMap(calibration.K0, calibration.k0, eye, calibration.K0, cv::Size(calibration.frameWidth, calibration.frameHeight), CV_32FC1, lensMap0Horz, lensMap0Vert);
cv::initUndistortRectifyMap(calibration.K0, calibration.k0, eye, calibration.K0, cv::Size(calibration.frameWidth, calibration.frameHeight), CV_32FC1, lensMap1Horz, lensMap1Vert);
//cv::Mat mapHorz, mapVert;
//cv::normalize(lensMap0Horz, mapHorz, 0, 255, cv::NORM_MINMAX, CV_8U);
//cv::normalize(lensMap0Vert, mapVert, 0, 255, cv::NORM_MINMAX, CV_8U);
//cv::imwrite("mapHorz.png", mapHorz);
//cv::imwrite("mapVert.png", mapVert);
}
void SMReconstructionWorker::reconstructPointCloud(SMFrameSequence frameSequence){
time.start();
// Decode frames
cv::Mat up0, vp0, up1, vp1, shading0, mask0, shading1, mask1;
decoder->decodeFrames(frameSequence.frames0, up0, vp0, mask0, shading0);
decoder->decodeFrames(frameSequence.frames1, up1, vp1, mask1, shading1);
// Triangulate
cv::Mat pointCloud;
if(dir == CodecDirBoth)
triangulateFromUpVp(up0, vp0, mask0, up1, vp1, mask1, pointCloud);
else if(dir == CodecDirHorizontal)
triangulateFromUp(up0, mask0, up1, mask1, pointCloud);
else if(dir == CodecDirVertical)
triangulateFromVp(vp0, mask0, vp1, mask1, pointCloud);
// Simply use shading information from camera 0 (for now)
cv::Mat shading = shading0;
// Convert point cloud to PCL format
pcl::PointCloud<pcl::PointXYZRGB>::Ptr pointCloudPCL(new pcl::PointCloud<pcl::PointXYZRGB>);
// Interprete as organized point cloud
pointCloudPCL->width = pointCloud.cols;
pointCloudPCL->height = pointCloud.rows;
pointCloudPCL->is_dense = false;
pointCloudPCL->points.resize(pointCloud.rows*pointCloud.cols);
for(int row=0; row<pointCloud.rows; row++){
int offset = row * pointCloudPCL->width;
for(int col=0; col<pointCloud.cols; col++){
const cv::Vec3f pnt = pointCloud.at<cv::Vec3f>(row,col);
unsigned char shade = shading.at<unsigned short>(row,col) >> 8;
pcl::PointXYZRGB point;
point.x = pnt[0]; point.y = pnt[1]; point.z = pnt[2];
point.r = shade; point.g = shade; point.b = shade;
pointCloudPCL->points[offset + col] = point;
}
}
/* // stack xyz data
std::vector<cv::Mat> xyz;
cv::split(pointCloud, xyz);
std::vector<cv::Mat> pointCloudChannels;
pointCloudChannels.push_back(xyz[0]);
pointCloudChannels.push_back(xyz[1]);
pointCloudChannels.push_back(xyz[2]);
// 4 byte padding
pointCloudChannels.push_back(cv::Mat::zeros(pointCloud.size(), CV_32F));
// triple uchar color information
std::vector<cv::Mat> rgb;
rgb.push_back(shading);
rgb.push_back(shading);
rgb.push_back(shading);
rgb.push_back(cv::Mat::zeros(shading.size(), CV_8U));
cv::Mat rgb8UC4;
cv::merge(rgb, rgb8UC4);
cv::Mat rgb32F(rgb8UC4.size(), CV_32F, rgb8UC4.data);
pointCloudChannels.push_back(rgb32F);
// 12 bytes padding
pointCloudChannels.push_back(cv::Mat::zeros(pointCloud.size(), CV_32F));
pointCloudChannels.push_back(cv::Mat::zeros(pointCloud.size(), CV_32F));
pointCloudChannels.push_back(cv::Mat::zeros(pointCloud.size(), CV_32F));
// merge channels
cv::Mat pointCloudPadded;
cv::merge(pointCloudChannels, pointCloudPadded);
// memcpy everything
memcpy(&pointCloudPCL->points[0], pointCloudPadded.data, pointCloudPadded.rows*pointCloudPadded.cols*sizeof(pcl::PointXYZRGB));*/
// Emit result
emit newPointCloud(pointCloudPCL);
std::cout << "SMReconstructionWorker: " << time.elapsed() << "ms" << std::endl;
}
void SMReconstructionWorker::reconstructPointClouds(std::vector<SMFrameSequence> frameSequences){
// Process sequentially
for(int i=0; i<frameSequences.size(); i++){
reconstructPointCloud(frameSequences[i]);
}
}
void SMReconstructionWorker::triangulateFromUp(cv::Mat up0, cv::Mat mask0,cv::Mat up1, cv::Mat mask1,cv::Mat &xyz){}
void SMReconstructionWorker::triangulateFromVp(cv::Mat vp0, cv::Mat mask0, cv::Mat vp1, cv::Mat mask1, cv::Mat &xyz){}
void SMReconstructionWorker::triangulateFromUpVp(cv::Mat up0, cv::Mat vp0, cv::Mat mask0, cv::Mat up1, cv::Mat vp1, cv::Mat mask1, cv::Mat &xyz){}
//void SMReconstructionWorker::triangulateFromUpVp(cv::Mat &up, cv::Mat &vp, cv::Mat &xyz){
// std::cerr << "WARNING! NOT FULLY IMPLEMENTED!" << std::endl;
// int N = up.rows * up.cols;
// cv::Mat projPointsCam(2, N, CV_32F);
// uc.reshape(0,1).copyTo(projPointsCam.row(0));
// vc.reshape(0,1).copyTo(projPointsCam.row(1));
// cv::Mat projPointsProj(2, N, CV_32F);
// up.reshape(0,1).copyTo(projPointsProj.row(0));
// vp.reshape(0,1).copyTo(projPointsProj.row(1));
// cv::Mat Pc(3,4,CV_32F,cv::Scalar(0.0));
// cv::Mat(calibration.Kc).copyTo(Pc(cv::Range(0,3), cv::Range(0,3)));
// cv::Mat Pp(3,4,CV_32F), temp(3,4,CV_32F);
// cv::Mat(calibration.Rp).copyTo(temp(cv::Range(0,3), cv::Range(0,3)));
// cv::Mat(calibration.Tp).copyTo(temp(cv::Range(0,3), cv::Range(3,4)));
// Pp = cv::Mat(calibration.Kp) * temp;
// cv::Mat xyzw;
// cv::triangulatePoints(Pc, Pp, projPointsCam, projPointsProj, xyzw);
// xyz.create(3, N, CV_32F);
// for(int i=0; i<N; i++){
// xyz.at<float>(0,i) = xyzw.at<float>(0,i)/xyzw.at<float>(3,i);
// xyz.at<float>(1,i) = xyzw.at<float>(1,i)/xyzw.at<float>(3,i);
// xyz.at<float>(2,i) = xyzw.at<float>(2,i)/xyzw.at<float>(3,i);
// }
// xyz = xyz.t();
// xyz = xyz.reshape(3, up.rows);
//}