Subversion Repositories seema-scanner

Rev

Rev 197 | Rev 207 | Go to most recent revision | Blame | Compare with Previous | Last modification | View Log | RSS feed

#include "SMCalibrationWorker.h"
#include "SMCalibrationParameters.h"

#include "cvtools.h"

#include <QSettings>

#include <ceres/ceres.h>


struct CircleResidual {
  CircleResidual(std::vector<cv::Point3f> _pointsOnArc)
      : pointsOnArc(_pointsOnArc) {}

  template <typename T>
  bool operator()(const T* point, const T* axis, T* residual) const {

    T axisSqNorm = axis[0]*axis[0] + axis[1]*axis[1] + axis[2]*axis[2];

    unsigned int l = pointsOnArc.size();
    std::vector<T> dI(l);

    // note, this is automatically initialized to 0
    T sum(0.0);

    for(unsigned int i=0; i<l; i++){
      cv::Point3d p = pointsOnArc[i];
      //T p[3] = {pointsOnArc[i].x, pointsOnArc[i].y, pointsOnArc[i].z};

      // point to line distance
      T dotProd = (point[0]-p.x)*axis[0] + (point[1]-p.y)*axis[1] + (point[2]-p.z)*axis[2];
      T dIx = point[0] - p.x - (dotProd*axis[0]/axisSqNorm);
      T dIy = point[1] - p.y - (dotProd*axis[1]/axisSqNorm);
      T dIz = point[2] - p.z - (dotProd*axis[2]/axisSqNorm);
      dI[i] = ceres::sqrt(dIx*dIx + dIy*dIy + dIz*dIz);

      sum += dI[i];
    }

    T mean = sum / double(l);

    for(unsigned int i=0; i<l; i++){
        residual[i] = dI[i] - mean;
    }

    return true;
  }

 private:

  // Observations for one checkerboard corner.
  const std::vector<cv::Point3f> pointsOnArc;
};


// Closed form solution to solve for the rotation axis from sets of 3D point coordinates of flat pattern feature points
// Algorithm according to Chen et al., Rotation axis calibration of a turntable using constrained global optimization, Optik 2014
// DTU, 2014, Jakob Wilm
static void rotationAxisCalibration(const std::vector< std::vector<cv::Point3f> > Qcam, const std::vector<cv::Point3f> Qobj, cv::Vec3f &axis, cv::Vec3f &point, float &error){

    // number of frames (points on each arch)
    int l = Qcam.size();

    // number of points in each frame
    size_t mn = Qobj.size();

    assert(mn == Qcam[0].size());

    // construct matrix for axis determination
    cv::Mat M(6, 6, CV_32F, cv::Scalar(0));

    for(int k=0; k<l; k++){
        for(unsigned int idx=0; idx<mn; idx++){

//            float i = Qobj[idx].x+4;
//            float j = Qobj[idx].y+4;
            float i = Qobj[idx].x;
            float j = Qobj[idx].y;

            float x = Qcam[k][idx].x;
            float y = Qcam[k][idx].y;
            float z = Qcam[k][idx].z;

            M += (cv::Mat_<float>(6,6) << x*x, x*y, x*z, x, i*x, j*x,
                                            0, y*y, y*z, y, i*y, j*y,
                                            0,   0, z*z, z, i*z, j*z,
                                            0,   0,   0, 1,   i,   j,
                                            0,   0,   0, 0, i*i, i*j,
                                            0,   0,   0, 0,   0, j*j);

        }
    }

    cv::completeSymm(M, false);

    // solve for axis
    std::vector<float> lambda;
    cv::Mat u;
    cv::eigen(M, lambda, u);

    float minLambda = std::abs(lambda[0]);
    int idx = 0;
    for(unsigned int i=1; i<lambda.size(); i++){
        if(abs(lambda[i]) < minLambda){
            minLambda = lambda[i];
            idx = i;
        }
    }

    axis = u.row(idx).colRange(0, 3);
    axis = cv::normalize(axis);

    float nx = u.at<float>(idx, 0);
    float ny = u.at<float>(idx, 1);
    float nz = u.at<float>(idx, 2);
    //float d  = u.at<float>(idx, 3);
    float dh = u.at<float>(idx, 4);
    float dv = u.at<float>(idx, 5);

//    // Paper version: c is initially eliminated
//    cv::Mat A(l*mn, mn+2, CV_32F, cv::Scalar(0.0));
//    cv::Mat bb(l*mn, 1, CV_32F);

//    for(int k=0; k<l; k++){
//        for(unsigned int idx=0; idx<mn; idx++){

//            float i = Qobj[idx].x;
//            float j = Qobj[idx].y;

//            float x = Qcam[k][idx].x;
//            float y = Qcam[k][idx].y;
//            float z = Qcam[k][idx].z;

//            float f = x*x + y*y + z*z + (2*x*nx + 2*y*ny + 2*z*nz)*(i*dh + j*dv);

//            int row = k*mn+idx;
//            A.at<float>(row, 0) = 2*x - (2*z*nx)/nz;
//            A.at<float>(row, 1) = 2*y - (2*z*ny)/nz;
//            A.at<float>(row, idx+2) = 1.0;

//            bb.at<float>(row, 0) = f + (2*z*d)/nz;
//        }
//    }

//    // solve for point
//    cv::Mat abe;
//    cv::solve(A, bb, abe, cv::DECOMP_SVD);

//    float a = abe.at<float>(0, 0);
//    float b = abe.at<float>(1, 0);
//    float c = -(nx*a+ny*b+d)/nz;

    // Our version: solve simultanously for a,b,c
    cv::Mat A(l*mn, mn+3, CV_32F, cv::Scalar(0.0));
    cv::Mat bb(l*mn, 1, CV_32F);

    for(int k=0; k<l; k++){
        for(unsigned int idx=0; idx<mn; idx++){

            float i = Qobj[idx].x;
            float j = Qobj[idx].y;

            float x = Qcam[k][idx].x;
            float y = Qcam[k][idx].y;
            float z = Qcam[k][idx].z;

            float f = x*x + y*y + z*z + (2*x*nx + 2*y*ny + 2*z*nz)*(i*dh + j*dv);

            int row = k*mn+idx;
            A.at<float>(row, 0) = 2*x;
            A.at<float>(row, 1) = 2*y;
            A.at<float>(row, 2) = 2*z;
            A.at<float>(row, idx+3) = 1.0;

            bb.at<float>(row, 0) = f;
        }
    }

    // solve for point
    cv::Mat abe;
    cv::solve(A, bb, abe, cv::DECOMP_SVD);

    float a = abe.at<float>(0, 0);
    float b = abe.at<float>(1, 0);
    float c = abe.at<float>(2, 0);

    point[0] = a;
    point[1] = b;
    point[2] = c;

    // Non-linear optimization using Ceres
    double pointArray[] = {point[0], point[1], point[2]};
    double axisArray[] = {axis[0], axis[1], axis[2]};

    ceres::Problem problem;
    // loop through saddle points
    for(unsigned int idx=0; idx<mn; idx++){
        std::vector<cv::Point3f> pointsOnArch(l);
        for(int k=0; k<l; k++){
            pointsOnArch[k] = Qcam[k][idx];
        }
        ceres::CostFunction* cost_function =
           new ceres::AutoDiffCostFunction<CircleResidual, ceres::DYNAMIC, 3, 3>(
               new CircleResidual(pointsOnArch), l);
        problem.AddResidualBlock(cost_function, NULL, pointArray, axisArray);
    }

    // Run the solver!
    ceres::Solver::Options options;
    options.linear_solver_type = ceres::DENSE_QR;
    options.minimizer_progress_to_stdout = true;
    ceres::Solver::Summary summary;
    ceres::Solve(options, &problem, &summary);

    std::cout << summary.BriefReport() << "\n";

    point = cv::Vec3f(pointArray[0], pointArray[1], pointArray[2]);
    axis = cv::Vec3f(axisArray[0], axisArray[1], axisArray[2]);
    axis /= cv::norm(axis);


    // Error estimate (sum of squared differences)
    error = 0;
    // loop through saddle points
    for(unsigned int idx=0; idx<mn; idx++){

        // vector of distances from rotation axis
        std::vector<float> dI(l);
        // loop through angular positions
        for(int k=0; k<l; k++){
            cv::Vec3f p = cv::Vec3f(Qcam[k][idx]);
            // point to line distance
            dI[k] = cv::norm((point-p)-(point-p).dot(axis)*axis);
        }
        float sum = std::accumulate(dI.begin(), dI.end(), 0.0);
        float mean = sum / dI.size();
        float meanDev = 0;
        for(int k=0; k<l; k++){
            meanDev += std::abs(dI[k] - mean);
        }
        meanDev /= l;
        error += meanDev;
    }
    error /= mn;
}

void SMCalibrationWorker::performCalibration(std::vector<SMCalibrationSet> calibrationData){

    QSettings settings;

    // Number of saddle points on calibration pattern
    int checkerCountX = settings.value("calibration/patternSizeX", 22).toInt();
    int checkerCountY = settings.value("calibration/patternSizeY", 13).toInt();
    cv::Size checkerCount(checkerCountX, checkerCountY);

    unsigned int nSets = calibrationData.size();

    // 2D Points collected for OpenCV's calibration procedures
    std::vector< std::vector<cv::Point2f> > qc0, qc1;
    std::vector< std::vector<cv::Point2f> > qc0Stereo, qc1Stereo;

    std::vector<bool> success0(nSets), success1(nSets);

    std::vector<float> angles;

    // Loop through calibration sets
    for(unsigned int i=0; i<nSets; i++){

        SMCalibrationSet SMCalibrationSetI = calibrationData[i];

        if(!SMCalibrationSetI.checked)
            continue;

        // Camera 0
        std::vector<cv::Point2f> qci0;

        // Convert to grayscale
        cv::Mat gray;
        if(SMCalibrationSetI.frame0.channels() == 1)
            cv::cvtColor(SMCalibrationSetI.frame0, gray, CV_BayerBG2GRAY);
        else
            cv::cvtColor(SMCalibrationSetI.frame0, gray, CV_RGB2GRAY);

        // Extract checker corners
        success0[i] = cv::findChessboardCorners(gray, checkerCount, qci0, cv::CALIB_CB_ADAPTIVE_THRESH + cv::CALIB_CB_FAST_CHECK);
        if(success0[i]){
            cv::cornerSubPix(gray, qci0, cv::Size(6, 6), cv::Size(1, 1),cv::TermCriteria(CV_TERMCRIT_EPS + CV_TERMCRIT_ITER, 20, 0.0001));
            // Draw colored chessboard
            cv::Mat color;
            if(SMCalibrationSetI.frame0.channels() == 1)
                cv::cvtColor(SMCalibrationSetI.frame0, color, CV_BayerBG2RGB);
            else
                color = SMCalibrationSetI.frame0.clone();

            cvtools::drawChessboardCorners(color, checkerCount, qci0, success0[i], 10);
            SMCalibrationSetI.frame0Result = color;
        }

        emit newFrameResult(i, 0, success0[i], SMCalibrationSetI.frame0Result);

        // Camera 1
        std::vector<cv::Point2f> qci1;

        // Convert to grayscale
        if(SMCalibrationSetI.frame1.channels() == 1)
            cv::cvtColor(SMCalibrationSetI.frame1, gray, CV_BayerBG2GRAY);
        else
            cv::cvtColor(SMCalibrationSetI.frame1, gray, CV_RGB2GRAY);

        // Extract checker corners
        success1[i] = cv::findChessboardCorners(gray, checkerCount, qci1, cv::CALIB_CB_ADAPTIVE_THRESH + cv::CALIB_CB_FAST_CHECK);
        if(success1[i]){
            cv::cornerSubPix(gray, qci1, cv::Size(6, 6), cv::Size(1, 1),cv::TermCriteria(CV_TERMCRIT_EPS + CV_TERMCRIT_ITER, 20, 0.0001));
            // Draw colored chessboard
            cv::Mat color;
            if(SMCalibrationSetI.frame1.channels() == 1)
                cv::cvtColor(SMCalibrationSetI.frame1, color, CV_BayerBG2RGB);
            else
                color = SMCalibrationSetI.frame1.clone();

            cvtools::drawChessboardCorners(color, checkerCount, qci1, success1[i], 10);
            SMCalibrationSetI.frame1Result = color;
        }

        emit newFrameResult(i, 1, success1[i], SMCalibrationSetI.frame1Result);

        if(success0[i])
            qc0.push_back(qci0);

        if(success1[i])
            qc1.push_back(qci1);

        if(success0[i] && success1[i]){
            qc0Stereo.push_back(qci0);
            qc1Stereo.push_back(qci1);
            angles.push_back(SMCalibrationSetI.rotationAngle);
        }

        // Show progress
        emit newSetProcessed(i);
    }

    size_t nValidSets = angles.size();
    if(nValidSets < 2){
        std::cerr << "Not enough valid calibration sequences!" << std::endl;
        emit done();
        return;
    }

    // Generate world object coordinates [mm]
    float checkerSize = settings.value("calibration/squareSize", 10.0).toFloat(); // width and height of one checker square in mm
    std::vector<cv::Point3f> Qi;
    for (int h=0; h<checkerCount.height; h++)
        for (int w=0; w<checkerCount.width; w++)
            Qi.push_back(cv::Point3f(checkerSize * w, checkerSize* h, 0.0));

    std::vector< std::vector<cv::Point3f> > Q0, Q1, QStereo;
    for(unsigned int i=0; i<qc0.size(); i++)
        Q0.push_back(Qi);
    for(unsigned int i=0; i<qc1.size(); i++)
        Q1.push_back(Qi);
    for(unsigned int i=0; i<nValidSets; i++)
        QStereo.push_back(Qi);

    // calibrate the cameras
    SMCalibrationParameters cal;
    cal.frameWidth = calibrationData[0].frame0.cols;
    cal.frameHeight = calibrationData[0].frame0.rows;
    cv::Size frameSize(cal.frameWidth, cal.frameHeight);

    // determine only k1, k2 for lens distortion
    int flags = cv::CALIB_FIX_ASPECT_RATIO + cv::CALIB_FIX_K3 + cv::CALIB_ZERO_TANGENT_DIST + cv::CALIB_FIX_PRINCIPAL_POINT;
    // Note: several of the output arguments below must be cv::Mat, otherwise segfault
    std::vector<cv::Mat> cam_rvecs0, cam_tvecs0;
    cal.cam0_error = cv::calibrateCamera(Q0, qc0, frameSize, cal.K0, cal.k0, cam_rvecs0, cam_tvecs0, flags,
                                         cv::TermCriteria(cv::TermCriteria::COUNT+cv::TermCriteria::EPS, 100, DBL_EPSILON));
//std::cout << cal.k0 << std::endl;
//    // refine extrinsics for camera 0
//    for(int i=0; i<Q.size(); i++)
//        cv::solvePnPRansac(Q[i], qc0[i], cal.K0, cal.k0, cam_rvecs0[i], cam_tvecs0[i], true, 100, 0.05, 100, cv::noArray(), CV_ITERATIVE);

    std::vector<cv::Mat> cam_rvecs1, cam_tvecs1;
    cal.cam1_error = cv::calibrateCamera(Q1, qc1, frameSize, cal.K1, cal.k1, cam_rvecs1, cam_tvecs1, flags,
                                         cv::TermCriteria(cv::TermCriteria::COUNT+cv::TermCriteria::EPS, 100, DBL_EPSILON));
//std::cout << cal.k1 << std::endl;
    // stereo calibration
    int flags_stereo = cv::CALIB_FIX_INTRINSIC;// + cv::CALIB_FIX_K2 + cv::CALIB_FIX_K3 + cv::CALIB_ZERO_TANGENT_DIST + cv::CALIB_FIX_PRINCIPAL_POINT + cv::CALIB_FIX_ASPECT_RATIO;
    cv::Mat E, F, R1, T1;
    cal.stereo_error = cv::stereoCalibrate(QStereo, qc0Stereo, qc1Stereo, cal.K0, cal.k0, cal.K1, cal.k1,
                                              frameSize, R1, T1, E, F,
                                              cv::TermCriteria(cv::TermCriteria::COUNT + cv::TermCriteria::EPS, 200, DBL_EPSILON),
                                              flags_stereo);

    cal.R1 = R1;
    cal.T1 = T1;
    cal.E = E;
    cal.F = F;

    // Determine per-view reprojection errors:
    cal.cam0_errors_per_view.resize(nSets);
    int idx = 0;
    for(unsigned int i = 0; i < nSets; ++i){
        if(success0[i]){
            int n = (int)Q0[idx].size();
            std::vector<cv::Point2f> qc_proj;
            cv::projectPoints(cv::Mat(Q0[idx]), cam_rvecs0[idx], cam_tvecs0[idx], cal.K0,  cal.k0, qc_proj);
            float err = 0;
            for(unsigned int j=0; j<qc_proj.size(); j++){
                cv::Point2f d = qc0[idx][j] - qc_proj[j];
                err += cv::sqrt(d.x*d.x + d.y*d.y);
            }
            cal.cam0_errors_per_view[i] = (float)err/n;
            idx++;
        } else
            cal.cam0_errors_per_view[i] = NAN;
    }
    cal.cam1_errors_per_view.resize(nSets);
    idx = 0;
    for(unsigned int i = 0; i < nSets; ++i){
        if(success1[i]){
            int n = (int)Q1[idx].size();
            std::vector<cv::Point2f> qc_proj;
            cv::projectPoints(cv::Mat(Q1[idx]), cam_rvecs1[idx], cam_tvecs1[idx], cal.K1,  cal.k1, qc_proj);
            float err = 0;
            for(unsigned int j=0; j<qc_proj.size(); j++){
                cv::Point2f d = qc1[idx][j] - qc_proj[j];
                err += cv::sqrt(d.x*d.x + d.y*d.y);
            }
            cal.cam1_errors_per_view[i] = (float)err/n;
            idx++;
       } else
            cal.cam1_errors_per_view[i] = NAN;
    }

//    // hand-eye calibration
//    std::vector<cv::Matx33f> Rc(nValidSets - 1); // rotations/translations of the checkerboard in camera 0 reference frame
//    std::vector<cv::Vec3f> Tc(nValidSets - 1);
//    std::vector<cv::Matx33f> Rr(nValidSets - 1); // in rotation stage reference frame
//    std::vector<cv::Vec3f> Tr(nValidSets - 1);
//    for(int i=0; i<nValidSets-1; i++){
//        // relative transformations in camera
//        cv::Mat cRw1, cRw2;
//        cv::Rodrigues(cam_rvecs0[i], cRw1);
//        cv::Rodrigues(cam_rvecs0[i+1], cRw2);
//        cv::Mat cTw1 = cam_tvecs0[i];
//        cv::Mat cTw2 = cam_tvecs0[i+1];
//        cv::Mat w1Rc = cRw1.t();
//        cv::Mat w1Tc = -cRw1.t()*cTw1;
//        Rc[i] = cv::Mat(cRw2*w1Rc);
//        Tc[i] = cv::Mat(cRw2*w1Tc+cTw2);

//        // relative transformations in rotation stage
//        // we define the rotation axis to be in origo, pointing in positive y direction
//        float angleRadians = (angles[i+1]-angles[i])/180.0*M_PI;
//        cv::Vec3f rot_rvec(0.0, -angleRadians, 0.0);
//        cv::Mat Rri;
//        cv::Rodrigues(rot_rvec, Rri);
//        Rr[i] = Rri;
//        Tr[i] = 0.0;

////        std::cout << i << std::endl;
////        std::cout << "cTw1" << cTw1 << std::endl;
////        std::cout << "cTw2" << cTw2 << std::endl;
////        std::cout << "w2Rc" << w2Rc << std::endl;
////        std::cout << "w2Tc" << w2Tc << std::endl;

////        std::cout << "w2Rc" << w2Rc << std::endl;
////        std::cout << "w2Tc" << w2Tc << std::endl;

////        cv::Mat Rci;
////        cv::Rodrigues(Rc[i], Rci);
////        std::cout << "Rci" << Rci << std::endl;
////        std::cout << "Tc[i]" << Tc[i] << std::endl;

////        std::cout << "rot_rvec" << rot_rvec << std::endl;
////        std::cout << "Tr[i]" << Tr[i] << std::endl;
////        std::cout << std::endl;
//    }

//    // determine the transformation from rotation stage to camera 0
//    cvtools::handEyeCalibrationTsai(Rc, Tc, Rr, Tr, cal.Rr, cal.Tr);

//    for(int i=0; i<nValidSets-1; i++){
//        std::cout << i << std::endl;

//        cv::Mat Rci;
//        cv::Rodrigues(Rc[i], Rci);
//        std::cout << "Rc[i]" << Rci << std::endl;
//        std::cout << "Tc[i]" << Tc[i] << std::endl;

//        cv::Mat Rri;
//        cv::Rodrigues(Rr[i], Rri);
//        std::cout << "Rr[i]" << Rri << std::endl;
//        std::cout << "Tr[i]" << Tr[i] << std::endl;

//        cv::Mat Rcr = cv::Mat(cal.Rr)*cv::Mat(Rc[i])*cv::Mat(cal.Rr.t());
//        cv::Rodrigues(Rcr, Rcr);
//        cv::Mat Tcr = -cv::Mat(cal.Rr)*cv::Mat(Rc[i])*cv::Mat(cal.Rr.t())*cv::Mat(cal.Tr) + cv::Mat(cal.Rr)*cv::Mat(Tc[i]) + cv::Mat(cal.Tr);
//        std::cout << "Rcr[i]" << Rcr << std::endl;
//        std::cout << "Tcr[i]" << Tcr << std::endl;
//        std::cout << std::endl;
//    }


    // Direct rotation axis calibration //
    // full camera matrices
    cv::Matx34f P0 = cv::Matx34f::eye();
    cv::Mat RT1(3, 4, CV_32F);
    cv::Mat(cal.R1).copyTo(RT1(cv::Range(0, 3), cv::Range(0, 3)));
    cv::Mat(cal.T1).copyTo(RT1(cv::Range(0, 3), cv::Range(3, 4)));
    cv::Matx34f P1 = cv::Matx34f(RT1);

    // calibration points in camera 0 frame
    std::vector< std::vector<cv::Point3f> > Qcam;

    for(unsigned int i=0; i<nValidSets; i++){
        std::vector<cv::Point2f> qc0i, qc1i;

        cv::undistortPoints(qc0Stereo[i], qc0i, cal.K0, cal.k0);
        cv::undistortPoints(qc1Stereo[i], qc1i, cal.K1, cal.k1);
//        qc0i = qc0[i];
//        qc1i = qc1[i];

        cv::Mat Qhom, Qcami;
        cv::triangulatePoints(P0, P1, qc0i, qc1i, Qhom);
        cvtools::convertMatFromHomogeneous(Qhom, Qcami);
        std::vector<cv::Point3f> QcamiPoints;
        cvtools::matToPoints3f(Qcami, QcamiPoints);

        Qcam.push_back(QcamiPoints);
    }

//    cv::Mat QcamMat(Qcam.size(), Qcam[0].size(), CV_32FC3);
//    for(int i=0; i<Qcam.size(); i++)
//        for(int j=0; j<Qcam[0].size(); j++)
//            QcamMat.at<cv::Point3f>(i,j) = Qcam[i][j];
//    cvtools::writeMat(QcamMat, "Qcam.mat", "Qcam");

    cv::Vec3f axis, point;
    float rot_axis_error;
    rotationAxisCalibration(Qcam, Qi, axis, point, rot_axis_error);

    // construct transformation matrix
    cv::Vec3f ex = axis.cross(cv::Vec3f(0,0,1.0));
    ex = cv::normalize(ex);
    cv::Vec3f ez = ex.cross(axis);
    ez = cv::normalize(ez);

    cv::Mat RrMat(3, 3, CV_32F);
    cv::Mat(ex).copyTo(RrMat.col(0));
    cv::Mat(axis).copyTo(RrMat.col(1));
    cv::Mat(ez).copyTo(RrMat.col(2));

    cal.Rr = cv::Matx33f(RrMat).t();
    cal.Tr = -cv::Matx33f(RrMat).t()*point;
    cal.rot_axis_error = rot_axis_error;

    // Print to std::cout
    cal.print();

    // save to (reentrant qsettings object)
    settings.setValue("calibration/parameters", QVariant::fromValue(cal));

    emit done();

}