GSoC2011SfM  0.1
Google Summer of Code 2011: Structure from motion
D:/Travail/These/Determination caracteristiques camera/GSoC/SfM/src/EuclideanEstimator.cpp
00001 
00002 
00003 #include <pcl/point_types.h>
00004 #include "libmv/multiview/five_point.h"
00005 #include "libmv/multiview/affine.h"
00006 #include "libmv/multiview/fundamental.h"
00007 #include "libmv/multiview/robust_fundamental.h"
00008 #include "libmv/multiview/focal_from_fundamental.h"
00009 #include "opencv2/core/eigen.hpp"
00010 
00011 #include <Eigen/Eigenvalues>
00012 
00013 #include <pcl/io/vtk_io.h>
00014 #include <sstream>
00015 
00016 #include "EuclideanEstimator.h"
00017 #include "StructureEstimator.h"
00018 #include "Camera.h"
00019 #include "Visualizer.h"
00020 #include "PCL_mapping.h"
00021 #include "bundle_related.h"
00022 
00023 using std::vector;
00024 using cv::Ptr;
00025 
00026 namespace OpencvSfM{
00027   //the next two functions are only for intern usage, no external interface...
00028 
00032   double SampsonDistance2( const libmv::Mat &F,
00033     const libmv::Mat2X &x1, const libmv::Mat2X &x2 ) {
00034     double error_total= 0.0;
00035     unsigned int n_points = x1.cols( );
00036     for( unsigned int i = 0; i < n_points ; ++i )
00037       error_total += libmv::SymmetricEpipolarDistance( F, x1.col( i ), x2.col( i ) );
00038 
00039     return error_total/(double)n_points;
00040   }
00041 
00045   double robust5Points( const libmv::Mat2X &x1, const libmv::Mat2X &x2,
00046     const libmv::Mat3 &K1, const libmv::Mat3 &K2,
00047     libmv::Mat3 &E )
00048   {
00049     unsigned int nPoints = x1.cols( );
00050     CV_DbgAssert( nPoints == x2.cols( ) );
00051     CV_DbgAssert( nPoints >= 5 );//need 5 points!
00052 
00053     cv::RNG& rng = cv::theRNG( );
00054     vector<int> masks( nPoints );
00055     double max_error = 1e9;
00056 
00057     int num_iter=0, max_iter=MIN( 2500, nPoints*(nPoints-5) );
00058     for( num_iter=0; num_iter<max_iter; ++num_iter )
00059     {
00060       masks.assign( nPoints, 0 );
00061       int nb_vals=0;
00062       //choose 5 random points:
00063       while( nb_vals < 5 )
00064       {
00065         int valTmp = rng( nPoints );
00066         if( masks[ valTmp ] == 0 )
00067         {
00068           masks[ valTmp ] = 1;
00069           nb_vals++;
00070         }
00071       }
00072       //create mask:
00073       libmv::Mat2X x1_tmp,x2_tmp;
00074       x1_tmp.resize( 2,nb_vals );
00075       x2_tmp.resize( 2,nb_vals );
00076       nb_vals=0;
00077       unsigned int i;
00078       for( i = 0; i<nPoints; ++i )
00079       {
00080         if( masks[ i ] != 0 )
00081         {
00082           x1_tmp( 0,nb_vals ) = x1( 0,i );
00083           x1_tmp( 1,nb_vals ) = x1( 1,i );
00084           x2_tmp( 0,nb_vals ) = x2( 0,i );
00085           x2_tmp( 1,nb_vals ) = x2( 1,i );
00086           nb_vals++;
00087         }
00088       }
00089       libmv::vector<libmv::Mat3, Eigen::aligned_allocator<libmv::Mat3> > Es(10);
00090       libmv::FivePointsRelativePose( x1_tmp,x2_tmp,&Es );
00091       unsigned int num_hyp = Es.size( );
00092       for ( i = 0; i < num_hyp; i++ ) {
00093 
00094         libmv::Mat3 F;
00095         libmv::FundamentalFromEssential( Es[ i ], K1, K2, &F );
00096         double error = SampsonDistance2( F, x1, x2 );
00097 
00098         if ( max_error > error ) {
00099           max_error = error;
00100           E = Es[ i ];
00101         }
00102       }
00103     }
00104     return max_error;
00105   }
00106 
00107   EuclideanEstimator::EuclideanEstimator( SequenceAnalyzer &sequence,
00108     vector<PointOfView>& cameras )
00109     :sequence_( sequence ),cameras_( cameras )
00110   {
00111     vector<PointOfView>::iterator itPoV=cameras.begin( );
00112     while ( itPoV!=cameras.end( ) )
00113     {
00114       addNewPointOfView( *itPoV );
00115       itPoV++;
00116     }
00117     index_origin = 0;
00118   }
00119 
00120   EuclideanEstimator::~EuclideanEstimator( void )
00121   {
00122     //TODO!!!!
00123   }
00124 
00125   void EuclideanEstimator::addNewPointOfView( const PointOfView& camera )
00126   {
00127     libmv::Mat3 intra_param;
00128     cv::Ptr<Camera> intra=camera.getIntraParameters( );
00129     //transpose because libmv needs intra params this way...
00130     cv::cv2eigen( intra->getIntraMatrix( ).t(), intra_param );
00131     intra_params_.push_back( intra_param );
00132     libmv::Mat3 rotation_mat;
00133     cv::cv2eigen( camera.getRotationMatrix( ), rotation_mat );
00134     rotations_.push_back( rotation_mat );
00135     libmv::Vec3 translation_vec;
00136     cv::cv2eigen( camera.getTranslationVector( ), translation_vec );
00137     translations_.push_back( translation_vec );
00138     camera_computed_.push_back( false );
00139   }
00140 
00141   void EuclideanEstimator::bundleAdjustement( )
00142   {
00143     //wrap the lourakis SBA:
00144 
00145     unsigned int n = point_computed_.size( ),   // number of points
00146       ncon = 0,// number of points (starting from the 1st) whose parameters should not be modified.
00147       m = 0,   // number of images (or camera)
00148       mcon = 1,// number of cameras (starting from the 1st) whose parameters should not be modified.
00149       cnp = 6,// number of parameters for ONE camera; e.g. 6 for Euclidean cameras
00150       //use only vector part of quaternion to enforce the unit lenght...
00151       pnp = 3,// number of parameters for ONE 3D point; e.g. 3 for Euclidean points
00152       mnp = 2;// number of parameters for ONE projected point; e.g. 2 for Euclidean points
00153 
00154     unsigned int i = 0, j = 0,
00155       nb_cam = camera_computed_.size( );
00156     vector< Ptr< PointsToTrack > > &points_to_track = sequence_.getPoints( );
00157 
00158     //because some points are sometime not visible:
00159     vector<int> idx_cameras;
00160     idx_cameras.push_back( index_origin );
00161 
00162     std::vector<bool> pointOK;
00163     int nbPoints = 0;
00164     for ( j = 0; j < n; ++j )
00165     {//for each 3D point:
00166       //test if at least 2 views see this point:
00167       int nbCam = 0;
00168       for(size_t k =0; k<nb_cam; ++k)
00169       {
00170         if(camera_computed_[ k ] && point_computed_[ j ].containImage( k ))
00171           nbCam++;
00172       }
00173       pointOK.push_back( nbCam>=2 );
00174       if(pointOK[j])
00175         nbPoints++;
00176     }
00177 
00178     int nz_count = 0;
00179     for ( i = 0; i < nb_cam; ++i )
00180     {//for each camera:
00181       if( camera_computed_[ i ] )
00182       {
00183         if( i!=index_origin )//index_origin is already added...
00184           idx_cameras.push_back(i);
00185         m++;//increament of camera count
00186 
00187         int nb_projection = 0;
00188         for ( j = 0; j < n; ++j )
00189         {//for each 3D point:
00190           if( pointOK[j])
00191           {
00192             if( point_computed_[ j ].containImage( i ) )
00193               nb_projection++;
00194           }
00195         }
00196         nz_count += nb_projection;
00197       }
00198     }
00199     n=nbPoints;
00200 
00201     //2D points:
00202     char *vmask = new char[ n*m ];//visibility mask: vmask[i, j]=1 if point i visible in image j, 0 otherwise.
00203     double *p = new double[m*cnp + n*pnp];//initial parameter vector p0: (a1, ..., am, b1, ..., bn).
00204                    // aj are the image j parameters, bi are the i-th point parameters
00205 
00206     double *x = new double[ 2*nz_count ];// measurements vector: (x_11^T, .. x_1m^T, ..., x_n1^T, .. x_nm^T)^T where
00207                    // x_ij is the projection of the i-th point on the j-th image.
00208                    // NOTE: some of the x_ij might be missing, if point i is not visible in image j;
00209                    // see vmask[i, j], max. size n*m*mnp
00210 
00211     libmv::vector< Eigen::Quaterniond > init_rotation;
00212     libmv::vector< libmv::Vec3 > init_translat;
00213     libmv::vector< libmv::Mat3 > intra_p;
00214     libmv::vector< int > idx_intra;
00215     //update each variable:
00216     int idx_visible = 0;
00217     double *p_local = p;
00218     for ( i=0; i < m; ++i )
00219     {//for each camera:
00220       int idx_cam = idx_cameras[i];
00221       idx_intra.push_back( i );//indeed, every intra parameters are distinct...
00222       intra_p.push_back( intra_params_[idx_cam] );
00223       //extrinsic parameters only (intra are know in euclidean reconstruction)
00224       init_rotation.push_back( (Eigen::Quaterniond)rotations_[ idx_cam ] );
00225       init_translat.push_back( translations_[ idx_cam ] );
00226       //add camera parameters to p:
00227       //as this is rotation, the quaternion's length is unity. Only 3 values are needed.
00228       //4th value equal:
00229       //sqrt(1.0 - quat[0]*quat[0] - quat[1]*quat[1] - quat[2]*quat[2]));
00230 
00231       p_local[0] = 0; p_local[1] = 0; p_local[2] = 0;
00232 
00233       p_local[3] = 0; p_local[4] = 0; p_local[5] = 0;
00234 
00235       p_local+=cnp;
00236     }
00237 
00238     //now add the projections and 3D points:
00239     idx_visible = 0;
00240     int j_real = 0;
00241     for ( j = 0; j < point_computed_.size(); ++j )
00242     {//for each 3D point:
00243       if( pointOK[j])
00244       {
00245         for ( i=0; i < m; ++i )
00246         {//for each camera:
00247           int idx_cam = idx_cameras[i];
00248           vmask[ i+j_real*m ] = point_computed_[ j ].containImage( idx_cam );
00249           if( vmask[ i+j_real*m ] )
00250           {
00251             cv::KeyPoint pt = points_to_track[ idx_cam ]->getKeypoint(
00252               point_computed_[ j ].getPointIndex( idx_cam ) );
00253             x[ idx_visible++ ] = pt.pt.x;
00254             x[ idx_visible++ ] = pt.pt.y;
00255           }
00256         }
00257         j_real++;
00258       }
00259     }
00260     double* points3D_values = p_local;
00261     for ( j = 0; j < point_computed_.size(); ++j )
00262     {//for each 3D point:
00263       if( pointOK[j])
00264       {
00265         cv::Ptr<cv::Vec3d> cv3DPoint = point_computed_[ j ].get3DPosition();
00266         if(cv3DPoint.empty())
00267         {
00268           *(p_local++) = 0;
00269           *(p_local++) = 0;
00270           *(p_local++) = 0;
00271         }
00272         else
00273         {
00274           *(p_local++) = (*cv3DPoint)[ 0 ];
00275           *(p_local++) = (*cv3DPoint)[ 1 ];
00276           *(p_local++) = (*cv3DPoint)[ 2 ];
00277         }
00278       }
00279     }
00280 
00281     bundle_datas data(idx_intra,intra_p,init_rotation,init_translat,
00282       cnp, pnp, mnp,ncon, mcon);
00283     data.points3D = points3D_values;
00284 
00286 //#define PRINT_DEBUG
00287 #ifdef PRINT_DEBUG
00288     //Debug compare projected point vs estimated point:
00289     idx_visible = 0;
00290     double max_distance = 0;
00291     double max_depth = 0;
00292     j_real = 0;
00293     for ( j = 0; j < point_computed_.size(); ++j )
00294     {//for each 3D point:
00295       if( pointOK[j])
00296       {
00297         for ( i=0; i < m; ++i )
00298         {//for each camera:
00299           //2D projected points
00300           if( vmask[ i+j_real*m ] )
00301           {
00302             cv::Vec3d& cv3DPoint = point_computed_[ j ];
00303             //cout<<"Vec3d : "<< cv3DPoint[ 0 ]<<", "<< cv3DPoint[ 1 ]<<", "<< cv3DPoint[ 2 ]<<endl;
00304             int idx_cam = idx_cameras[i];
00305             cv::KeyPoint pt = points_to_track[ idx_cam ]->getKeypoint(
00306               point_computed_[ j ].getPointIndex( idx_cam ) );
00307             cv::Vec2d proj = cameras_[ idx_cam ].project3DPointIntoImage(cv3DPoint);/*
00308             cout<<pt.pt.x<<","<<pt.pt.y<<" -> ";
00309             cout<<proj[0]<<","<<proj[1]<<endl;*/
00310             max_distance += (pt.pt.x - proj[0])*(pt.pt.x - proj[0]) +
00311               (pt.pt.y - proj[1])*(pt.pt.y - proj[1]);
00312 
00313             libmv::Vec3 X(cv3DPoint[0],cv3DPoint[1],cv3DPoint[2]);
00314             max_depth += abs( (rotations_[ idx_cam ]*X)(2) + translations_[ idx_cam ](2) );
00315           }
00316         }
00317         j_real++;
00318       }
00319       //system("pause");
00320     }
00321 #endif
00322 
00323 
00324     //TUNING PARAMETERS:
00325     int itmax = 10000;        //max iterations
00326     int verbose = 1;
00327     double opts[SBA_OPTSSZ] = {
00328       0.00001,          //Tau
00329       1e-20,            //E1
00330       1e-20,            //E2
00331       0,                //E3 average reprojection error
00332       0         //E4 relative reduction in the RMS reprojection error
00333     };
00334 
00335     double info[SBA_INFOSZ];
00336 
00337     //use sba library
00338     int iter = sba_motstr_levmar_x(n, ncon, m, mcon, vmask, p, cnp, pnp, x, NULL, mnp,
00339         img_projsRTS_x, img_projsRTS_jac_x, (void*)&data, itmax, 0, opts, info);
00340 
00341     std::cout<<"SBA ("<<nz_count<<") returned in "<<iter<<" iter, reason "<<info[6]
00342     <<", error "<<info[1]<<" [initial "<< info[0]<<"]\n";
00343     if(iter>1)
00344     {
00345     //set new values:
00346     m = idx_cameras.size();
00347     n = point_computed_.size( );
00348     idx_visible = 0;
00349     p_local = p;
00350     for ( i=0; i < m; ++i )
00351     {//for each camera:
00352       int idx_cam = idx_cameras[i];
00353       //extrinsic parameters only (intra are know in euclidean reconstruction)
00354 
00355       Eigen::Quaterniond rot_init = data.rotations[i];
00356       double c1 = p_local[0];
00357       double c2 = p_local[1];
00358       double c3 = p_local[2];
00359       double coef=(1.0 - c1*c1 - c2*c2 - c3*c3 );
00360       if( coef>0 )
00361         coef = sqrt( coef );
00362       else//problem with this rotation...
00363       {
00364         coef = 0;
00365         Eigen::Quaterniond quat_delta( coef, c1, c2, c3 );
00366         quat_delta.normalize();
00367         c1=quat_delta.x(); c2=quat_delta.y(); c3=quat_delta.z();
00368         coef = quat_delta.w();
00369       }
00370 
00371       Eigen::Quaterniond quat_delta( coef, c1, c2, c3 );
00372       Eigen::Quaterniond rot_total = quat_delta * rot_init;
00373       //add camera parameters to p:
00374       rotations_[ idx_cam ] = rot_total.toRotationMatrix();
00375 
00376       translations_[ idx_cam ](0) += p_local[3];
00377       translations_[ idx_cam ](1) += p_local[4];
00378       translations_[ idx_cam ](2) += p_local[5];
00379 
00380       //update camera's structure:
00381       cv::Mat newRotation,newTranslation;
00382       cv::eigen2cv( rotations_[ idx_cam ], newRotation );
00383       cv::eigen2cv( translations_[ idx_cam ], newTranslation );
00384       cameras_[ idx_cam ].setRotationMatrix( newRotation );
00385       cameras_[ idx_cam ].setTranslationVector( newTranslation );
00386 
00387       p_local+=cnp;
00388     }
00389     for ( j = 0; j < point_computed_.size(); ++j )
00390     {//for each 3D point:
00391       if( pointOK[j])
00392       {
00393         cv::Vec3d cv3DPoint;
00394         cv3DPoint[ 0 ] = *(p_local++);
00395         cv3DPoint[ 1 ] = *(p_local++);
00396         cv3DPoint[ 2 ] = *(p_local++);
00397         point_computed_[ j ].set3DPosition( cv3DPoint );
00398       }
00399     }
00400 
00402 #ifdef PRINT_DEBUG
00403     //Debug compare projected point vs estimated point:
00404     idx_visible = 0;
00405     //2D projected points
00406     double max_distance_1 = 0;
00407     double max_depth1 = 0;
00408     j_real = 0;
00409     for ( j = 0; j < n; ++j )
00410     {//for each 3D point:
00411       if( pointOK[j] )
00412       {
00413       for ( i=0; i < m; ++i )
00414       {//for each camera:
00415         if( vmask[ i+j_real*m ] )
00416         {
00417           cv::Vec3d& cv3DPoint = point_computed_[ j ];
00418           //cout<<"Vec3d : "<< cv3DPoint[ 0 ]<<", "<< cv3DPoint[ 1 ]<<", "<< cv3DPoint[ 2 ]<<endl;
00419           int idx_cam = idx_cameras[i];
00420           cv::KeyPoint pt = points_to_track[ idx_cam ]->getKeypoint(
00421             point_computed_[ j ].getPointIndex( idx_cam ) );
00422           cv::Vec2d proj = cameras_[ idx_cam ].project3DPointIntoImage(cv3DPoint);/*
00423           cout<<pt.pt.x<<","<<pt.pt.y<<" -> ";
00424           cout<<proj[0]<<","<<proj[1]<<endl;*/
00425           max_distance_1 += (pt.pt.x - proj[0])*(pt.pt.x - proj[0]) +
00426             (pt.pt.y - proj[1])*(pt.pt.y - proj[1]);
00427           libmv::Vec3 X(cv3DPoint[0],cv3DPoint[1],cv3DPoint[2]);
00428           max_depth1 += abs( (rotations_[ idx_cam ]*X)(2) + translations_[ idx_cam ](2) );
00429         }
00430       }
00431       j_real++;
00432     }
00433       //system("pause");
00434     }
00435     cout<< (max_distance)<<"  ; "<<(max_distance_1)<<endl;
00436     cout<< (max_depth)<<"  ; "<<(max_depth1)<<endl;
00437 #endif
00438 
00439 
00440     }
00441 
00442     delete [] vmask;//visibility mask
00443     delete [] p;//initial parameter vector p0: (a1, ..., am, b1, ..., bn).
00444     delete [] x;// measurement vector
00445   }
00446 
00447   bool EuclideanEstimator::cameraResection( unsigned int image, int max_reprojection )
00448   {
00449     //wrap the lourakis SBA:
00450     cout<<"resection"<<endl;
00451     unsigned int n = point_computed_.size( ),   // number of points
00452       m = 0,   // number of images (or camera)
00453       mcon = 0,// number of images (starting from the 1st) whose parameters should not be modified.
00454       cnp = 6,// number of parameters for ONE camera; e.g. 6 for Euclidean cameras
00455       //use only vector part of quaternion to enforce the unit lenght...
00456       mnp = 2;// number of parameters for ONE projected point; e.g. 2 for Euclidean points
00457 
00458     unsigned int i = 0, j = 0,
00459       nb_cam = camera_computed_.size( );
00460     vector< Ptr< PointsToTrack > > &points_to_track = sequence_.getPoints( );
00461     vector< TrackOfPoints > real_track;
00462     //keep only tracks having image:
00463 
00464     for(i=0; i<n; i++)
00465       if( point_computed_[i].containImage(image) )
00466         real_track.push_back(point_computed_[i]);
00467     n = real_track.size();
00468 
00469     //now for fun show the sequence on images:
00470     //sequence_.showTracks( image, real_track );
00471 
00472     //because some points are sometime not visible:
00473     vector<int> idx_cameras;
00474     libmv::vector< libmv::Mat3 > intra_p;
00475     int nz_count = 0;
00476     for ( i = 0; i < nb_cam; ++i )
00477     {//for each camera:
00478       if( camera_computed_[ i ] && i!=image )
00479       {
00480         idx_cameras.push_back(i);
00481         m++;//increament of camera count
00482 
00483         int nb_projection = 0;
00484         for ( j = 0; j < n; ++j )
00485         {//for each 3D point:
00486           if( real_track[ j ].containImage( i ) )
00487             nb_projection++;
00488         }
00489         nz_count += nb_projection;
00490       }
00491     }
00492 
00493     mcon = m;//other cameras are constant!
00494 
00495     idx_cameras.push_back(image);
00496     m++;//increament of camera count
00497 
00498     int nb_projection = 0;
00499     for ( j = 0; j < n; ++j )
00500     {//for each 3D point:
00501       if( real_track[ j ].containImage( image ) )
00502         nb_projection++;
00503     }
00504     nz_count += nb_projection;
00505 
00506     //2D points:
00507     char *vmask = new char[ n*m ];//visibility mask: vmask[i, j]=1 if point i visible in image j, 0 otherwise.
00508     double *p = new double[m*cnp + n*3];//initial parameter vector p0: (a1, ..., am, b1, ..., bn).
00509     // aj are the image j parameters, bi are the i-th point parameters
00510 
00511     double *x = new double[ 2*nz_count ];// measurements vector: (x_11^T, .. x_1m^T, ..., x_n1^T, .. x_nm^T)^T where
00512     // x_ij is the projection of the i-th point on the j-th image.
00513     // NOTE: some of the x_ij might be missing, if point i is not visible in image j;
00514     // see vmask[i, j], max. size n*m*mnp
00515 
00516     libmv::vector< Eigen::Quaterniond > init_rotation;
00517     libmv::vector< libmv::Vec3 > init_translat;
00518     libmv::vector< int > idx_intra;
00519     //update each variable:
00520     double *p_local = p;
00521     for ( i=0; i < m; ++i )
00522     {//for each camera:
00523       int idx_cam = idx_cameras[i];
00524       idx_intra.push_back( i );
00525       intra_p.push_back( intra_params_[idx_cam] );
00526       //extrinsic parameters only (intra are know in euclidean reconstruction)
00527       init_rotation.push_back( (Eigen::Quaterniond)rotations_[ idx_cam ] );
00528       init_translat.push_back( translations_[ idx_cam ] );
00529       //add camera parameters to p:
00530       //as this is rotation, the quaternion's length is unity. Only 3 values are needed.
00531       //4th value equal:
00532       //sqrt(1.0 - quat[0]*quat[0] - quat[1]*quat[1] - quat[2]*quat[2]));
00533 
00534       p_local[0] = 0; p_local[1] = 0; p_local[2] = 0;
00535 
00536 
00537       p_local[3] = 0; p_local[4] = 0; p_local[5] = 0;
00538 
00539       p_local+=cnp;
00540     }
00541 
00542     //now add the projections and 3D points:
00543     unsigned int idx_visible = 0;
00544     for ( j = 0; j < n; ++j )
00545     {//for each 3D point:
00546       for ( i=0; i < m; ++i )
00547       {//for each camera:
00548         int idx_cam = idx_cameras[i];
00549         vmask[ i+j*m ] = real_track[ j ].containImage( idx_cam );
00550         if( vmask[ i+j*m ] )
00551         {
00552           cv::KeyPoint pt = points_to_track[ idx_cam ]->getKeypoint(
00553             real_track[ j ].getPointIndex( idx_cam ) );
00554           x[ idx_visible++ ] = pt.pt.x;
00555           x[ idx_visible++ ] = pt.pt.y;
00556         }
00557       }
00558     }
00559     double* points3D_values = p_local;
00560     for ( j = 0; j < n; ++j )
00561     {//for each 3D point:
00562       cv::Vec3d cv3DPoint = real_track[ j ];
00563       *(p_local++) = cv3DPoint[ 0 ];
00564       *(p_local++) = cv3DPoint[ 1 ];
00565       *(p_local++) = cv3DPoint[ 2 ];
00566     }
00567 
00568     //TUNING PARAMETERS:
00569     int itmax = 1000;        //max iterations
00570     int verbose = 0;         //no debug
00571     double opts[SBA_OPTSSZ] = {
00572       0.1,              //Tau
00573       1e-12,            //E1
00574       1e-12,            //E2
00575       0,                //E3 average reprojection error
00576       0         //E4 relative reduction in the RMS reprojection error
00577     };
00578 
00579     double info[SBA_INFOSZ];
00580     bundle_datas data(idx_intra,intra_p,init_rotation, init_translat,
00581       cnp, 3, mnp, 0, mcon);
00582     data.points3D = points3D_values;
00583     //use sba library
00584     int iter = sba_mot_levmar_x(n, m, mcon, vmask, p, cnp, x, NULL, mnp,
00585       img_projsRT_x, NULL, (void*)&data, itmax, 0, opts, info);
00586 
00587     bool resection_ok = true;
00588     if( ( iter<=0 ) || (info[1]/nz_count)>max_reprojection )
00589     {
00590       resection_ok = false;
00591       std::cout<<"resection rejected ("<<nz_count<<") : "<<info[1]/nz_count<<std::endl;
00592     }
00593     else
00594     {
00595       std::cout<<"SBA ("<<nz_count<<") returned in "<<iter<<" iter, reason "<<info[6]
00596       <<", error "<<info[1]/nz_count<<" [initial "<< info[0]/nz_count<<"]\n";
00597 
00598 
00599       //set new values:
00600       m = idx_cameras.size();
00601       n = real_track.size( );
00602       idx_visible = 0;
00603       p_local = p + cnp*(m-1);
00604 
00605       //extrinsic parameters only (intra are know in euclidean reconstruction)
00606       Eigen::Quaterniond rot_init = data.rotations[ m-1 ];
00607       double c1 = p_local[0];
00608       double c2 = p_local[1];
00609       double c3 = p_local[2];
00610       double coef=(1.0 - c1*c1 - c2*c2 - c3*c3 );
00611       if( coef>0 )
00612         coef = sqrt( coef );
00613       else//problem with this rotation...
00614       {
00615         coef = 0;
00616         Eigen::Quaterniond quat_delta( coef, c1, c2, c3 );
00617         quat_delta.normalize();
00618         c1=quat_delta.x(); c2=quat_delta.y(); c3=quat_delta.z();
00619         coef = quat_delta.w();
00620       }
00621 
00622       Eigen::Quaterniond quat_delta( coef, c1, c2, c3 );
00623       Eigen::Quaterniond rot_total = quat_delta * rot_init;
00624       //add camera parameters to p:
00625       rotations_[ image ] = rot_total.toRotationMatrix();
00626 
00627       translations_[ image ](0) += p_local[3];
00628       translations_[ image ](1) += p_local[4];
00629       translations_[ image ](2) += p_local[5];
00630 
00631       //update camera's structure:
00632       cv::Mat newRotation,newTranslation;
00633       cv::eigen2cv( rotations_[ image ], newRotation );
00634       cv::eigen2cv( translations_[ image ], newTranslation );
00635       cameras_[ image ].setRotationMatrix( newRotation );
00636       cameras_[ image ].setTranslationVector( newTranslation );
00637 
00638       camera_computed_[ image ] = true;
00639     }
00640 
00641     delete [] vmask;//visibility mask
00642     delete [] p;//initial parameter vector p0: (a1, ..., am, b1, ..., bn).
00643     delete [] x;// measurement vector
00644 
00645 
00646     return resection_ok;
00647   }
00648 
00649   void EuclideanEstimator::initialReconstruction( int image1, int image2 )
00650   {
00651     vector<TrackOfPoints>& tracks = sequence_.getTracks( );
00652     camera_computed_[ image1 ] = true;
00653     index_origin = image1;
00654 
00655     vector< Ptr< PointsToTrack > > &points_to_track = sequence_.getPoints( );
00656     libmv::Mat3 E;
00657     Ptr<PointsToTrack> point_img1 = points_to_track[ image1 ];
00658     Ptr<PointsToTrack> point_img2 = points_to_track[ image2 ];
00659     //first extract points matches:
00660     libmv::Mat2X x1,x2;
00661     //for each points:
00662     unsigned int key_size = tracks.size( );
00663     unsigned int i;
00664     vector<TrackOfPoints> matches;
00665 
00666     for ( i=0; i < key_size; ++i )
00667     {
00668       TrackOfPoints &track = tracks[ i ];
00669       if( track.containImage( image1 ) && track.containImage( image2 ) )
00670         matches.push_back( track );
00671     }
00672     x1.resize( 2,matches.size( ) );
00673     x2.resize( 2,matches.size( ) );
00674 
00675     key_size = matches.size( );
00676     vector<cv::Vec2d> pointImg1,pointImg2;
00677     for ( i=0; i < key_size; ++i )
00678     {
00679       TrackOfPoints &track = matches[ i ];
00680       cv::DMatch match = track.toDMatch( image1, image2 );
00681 
00682       pointImg1.push_back( cv::Vec2d( point_img1->getKeypoint( match.trainIdx ).pt.x,
00683         point_img1->getKeypoint( match.trainIdx ).pt.y ) );
00684       pointImg2.push_back( cv::Vec2d( point_img2->getKeypoint( match.queryIdx ).pt.x,
00685         point_img2->getKeypoint( match.queryIdx ).pt.y ) );
00686     }
00687     vector<cv::Vec2d> pointNorm1 = cameras_[ image1 ].getIntraParameters( )->
00688       pixelToNormImageCoordinates( pointImg1 );
00689     vector<cv::Vec2d> pointNorm2 = cameras_[ image2 ].getIntraParameters( )->
00690       pixelToNormImageCoordinates( pointImg2 );
00691     key_size = pointNorm1.size( );
00692     for ( i=0; i < key_size; ++i )
00693     {
00694       x1( 0,i ) = -pointNorm1[ i ][ 0 ];
00695       x1( 1,i ) = -pointNorm1[ i ][ 1 ];
00696       x2( 0,i ) = -pointNorm2[ i ][ 0 ];
00697       x2( 1,i ) = -pointNorm2[ i ][ 1 ];
00698     }
00699 
00700     double error = robust5Points( x1, x2,
00701       intra_params_[ image1 ], intra_params_[ image2 ], E );
00702 
00703 
00704     //std::cout<<"E: "<<E<<std::endl;
00705     std::cout<<"max_error: "<<error<<std::endl;
00706 
00707 
00708     //From this essential matrix extract relative motion:
00709     libmv::Mat3 R;
00710     libmv::Vec3 t;
00711     libmv::Vec2 x1Col, x2Col;
00712     x1Col << x1( 0,0 ), x1( 1,0 );
00713     x2Col << x2( 0,0 ), x2( 1,0 );
00714     bool ok = libmv::MotionFromEssentialAndCorrespondence( E,
00715       intra_params_[ image1 ], x1Col,
00716       intra_params_[ image2 ], x2Col,
00717       &R, &t );
00718 
00719     rotations_[ image2 ] = R * rotations_[ image1 ];
00720     translations_[ image2 ] = t + R * translations_[ image1 ];
00721 
00722     //update camera's structure:
00723     cv::Mat newRotation,newTranslation;
00724     cv::eigen2cv( rotations_[ image2 ], newRotation );
00725     cv::eigen2cv( translations_[ image2 ], newTranslation );
00726     cameras_[ image2 ].setRotationMatrix( newRotation );
00727     cameras_[ image2 ].setTranslationVector( newTranslation );
00728 
00729     //this camera is now computed:
00730     camera_computed_[ image2 ] = true;
00731 
00732     //Triangulate the points:
00733     StructureEstimator se( &sequence_, &this->cameras_ );
00734     vector<int> images_to_compute;
00735     images_to_compute.push_back( image1 );
00736     images_to_compute.push_back( image2 );
00737     point_computed_ = se.computeStructure( images_to_compute );
00738     //bundleAdjustement();
00739   }
00740 
00741   void EuclideanEstimator::addMoreMatches(int img1, int img2,
00742     std::string detect, std::string extractor )
00743   {
00744     std::string methodMatch = "FlannBased";
00745     if( extractor.find("ORB")!=std::string::npos ||
00746       extractor.find("BRIEF")!=std::string::npos)
00747       methodMatch = "BruteForce-Hamming";
00748     Ptr<PointsMatcher> point_matcher =
00749       new PointsMatcher( cv::DescriptorMatcher::create( methodMatch ) );
00750 
00751     Ptr<PointsToTrack> pointCollection = Ptr<PointsToTrack>(
00752       new PointsToTrackWithImage( img1, sequence_.getImage(img1), detect, extractor ));
00753     Ptr<PointsToTrack> pointCollection1 = Ptr<PointsToTrack>(
00754       new PointsToTrackWithImage( img2, sequence_.getImage(img2), detect, extractor ));
00755     pointCollection->computeKeypointsAndDesc( true );
00756     point_matcher->add( pointCollection );
00757     point_matcher->train();
00758 
00759     pointCollection1->computeKeypointsAndDesc( true );
00760     Ptr<PointsMatcher> point_matcher1 =
00761       new PointsMatcher( cv::DescriptorMatcher::create( methodMatch ) );
00762     point_matcher1->add( pointCollection1 );
00763     point_matcher1->train( );
00764 
00765     vector< cv::DMatch > matches_i_j =
00766       SequenceAnalyzer::simple_matching(point_matcher, point_matcher1 );
00767     //matches don't use the same indices... set correct one:
00768     vector<cv::KeyPoint>& kpImg1 =
00769       sequence_.getPointsToTrack()[img1]->getModifiableKeypoints();
00770     vector<cv::KeyPoint>& kpImg2 =
00771       sequence_.getPointsToTrack()[img2]->getModifiableKeypoints();
00772     int point_added = 0;
00773     size_t nbK1 = kpImg1.size(), nbK2 = kpImg2.size(), cpt1;
00774     for(size_t cpt=0; cpt<matches_i_j.size(); ++cpt)
00775     {
00776       const cv::KeyPoint &key1 = point_matcher->getKeypoint(
00777         matches_i_j[ cpt ].trainIdx );
00778       const cv::KeyPoint &key2 = point_matcher1->getKeypoint(
00779         matches_i_j[ cpt ].queryIdx );
00780       //now look for a close keypoint:
00781       //first in img1:
00782       bool found = false;
00783       for(cpt1 = 0; cpt1<nbK1 && !found; ++cpt1)
00784         found = (abs(kpImg1[cpt1].pt.x - key1.pt.x) +
00785         abs(kpImg1[cpt1].pt.y - key1.pt.y) ) < 2;
00786       if(found)
00787         matches_i_j[ cpt ].trainIdx = --cpt1;
00788       else
00789       {
00790         matches_i_j[ cpt ].trainIdx = nbK1++;
00791         kpImg1.push_back( key1 );
00792         point_added++;
00793       }
00794 
00795       found = false;
00796       for(cpt1 = 0; cpt1<nbK2 && !found; ++cpt1)
00797         found = (abs(kpImg2[cpt1].pt.x - key2.pt.x) +
00798         abs(kpImg2[cpt1].pt.y - key2.pt.y) ) < 2;
00799       if(found)
00800         matches_i_j[ cpt ].queryIdx = --cpt1;
00801       else
00802       {
00803         matches_i_j[ cpt ].queryIdx = nbK2++;
00804         kpImg2.push_back( key2 );
00805         point_added++;
00806       }
00807     }
00808     sequence_.addMatches(matches_i_j,img1,img2);
00809 
00810     SequenceAnalyzer::keepOnlyCorrectMatches( sequence_, 2, 0 );
00811 
00812     //Triangulate the points:
00813     StructureEstimator se( &sequence_, &this->cameras_ );
00814 
00815     vector<int> images_to_compute;
00816     unsigned int key_size = camera_computed_.size( );
00817     for (unsigned int i=0; i < key_size; ++i )
00818     {
00819       if( camera_computed_[ i ] )
00820         images_to_compute.push_back( i );
00821     }
00822 
00823     point_computed_ = se.computeStructure( images_to_compute, 2 );
00824     //se.removeOutliersTracks( 5, &point_computed_ );
00825     //SequenceAnalyzer::keepOnlyCorrectMatches(point_computed_,2,0);
00826   }
00827 
00828   void EuclideanEstimator::computeReconstruction( )
00829   {
00830     vector<TrackOfPoints>& tracks = sequence_.getTracks( );
00831     vector< Ptr< PointsToTrack > > &points_to_track = sequence_.getPoints( );
00832     ImagesGraphConnection &images_graph = sequence_.getImgGraph( );
00833     //double ransac_threshold = 0.4 * sequence_.getImage( 0 ).rows / 100.0;
00834     double ransac_threshold = 3.0;
00835     //now create the graph:
00836 
00837     int img1,img2;
00838     int nbMatches = images_graph.getHighestLink( img1,img2 );
00839     vector<ImageLink> bestMatches;
00840     images_graph.getOrderedLinks( bestMatches, nbMatches*0.8 );
00841     double min_inliners=1e7;
00842     int index_of_min=0;/*
00843     cv::Mat minFundamental;
00844     for( unsigned int cpt=0;cpt<bestMatches.size( );cpt++ )
00845     {
00846       //construct the homography and choose the worse matches:
00847       //( see Snavely "Modeling the World from Internet Photo Collections" )
00848       std::vector<cv::Point2f> pointsImg1, pointsImg2;
00849       vector<uchar> status;
00850       points_to_track[ bestMatches[ cpt ].imgSrc ]->getKeyMatches( tracks,
00851         bestMatches[ cpt ].imgDest, pointsImg1 );
00852       points_to_track[ bestMatches[ cpt ].imgDest ]->getKeyMatches( tracks,
00853         bestMatches[ cpt ].imgSrc, pointsImg2 );
00854 
00855       //compute the homography:
00856       cv::findHomography( pointsImg1,pointsImg2,status,CV_RANSAC,
00857         ransac_threshold );
00858       //count the inliner points:
00859       double inliners=0;
00860       for( unsigned int i=0;i<status.size( );++i )
00861       {
00862         if( status[ i ] != 0 )
00863           inliners++;
00864       }
00865       double percent_inliner = inliners/static_cast<double>( pointsImg1.size( ) );
00866       if( percent_inliner < min_inliners )
00867       {
00868         min_inliners = percent_inliner;
00869         index_of_min = cpt;
00870         minFundamental = cv::findFundamentalMat( pointsImg1, pointsImg2,
00871           status, cv::FM_RANSAC );
00872       }
00873     }
00874     //we will start the reconstruction using bestMatches[ index_of_min ]
00875     //to avoid degenerate cases such as coincident cameras*/
00876 
00877     vector<int> images_computed;
00878     int iter = 0;
00879     int nbMax = 0, bestId = 0;/*
00880     do
00881     {
00882       bool not_correct;
00883       img1 = bestMatches[ index_of_min ].imgSrc;
00884       img2 = bestMatches[ index_of_min ].imgDest;
00885       cout<<img1<<", "<<img2<<endl;
00886       //sequence_.showTracksBetween(img1, img2);
00887 
00888       images_computed.clear();
00889       images_computed.push_back( img1 );
00890       images_computed.push_back( img2 );
00891       camera_computed_[ img1 ] = true;
00892       initialReconstruction( img1, img2 );
00893 
00894       //try to find more matches:
00895       std::vector< TrackOfPoints > point_before = point_computed_;
00896       cout<<point_computed_.size()<<endl;
00897 
00898       not_correct = point_computed_.size() < nbMatches/3 ||
00899         point_computed_.size() < point_before.size()-20;
00900       if( point_computed_.size()>nbMax )
00901       {
00902         nbMax = point_computed_.size();
00903         bestId = index_of_min;
00904       }
00905       point_computed_.clear();
00906       camera_computed_[ img1 ] = false;
00907       camera_computed_[ img2 ] = false;
00908       index_of_min++;
00909     } while ( index_of_min<bestMatches.size() );*/
00910     bestId = bestMatches.size() - 1;
00911     img1 = bestMatches[ bestId ].imgSrc;
00912     img2 = bestMatches[ bestId ].imgDest;
00913     cout<<img1<<", "<<img2<<endl;
00914     //sequence_.showTracksBetween(img1, img2);
00915 
00916     images_computed.clear();
00917     images_computed.push_back( img1 );
00918     images_computed.push_back( img2 );
00919     camera_computed_[ img1 ] = true;
00920     initialReconstruction( img1, img2 );
00921 
00922     //try to find more matches:
00923     std::vector< TrackOfPoints > point_before = point_computed_;
00924     cout<<"before"<<point_computed_.size()<<endl;
00925     addMoreMatches( img1, img2 );
00926     cout<<"after"<<point_computed_.size()<<endl;
00927     if( point_before.size() > point_computed_.size() )
00928       point_computed_ = point_before;
00929 
00930 
00931     //bundleAdjustement();
00932 
00933     //now we have updated the position of the camera which take img2
00934     //and 3D estimation from these 2 first cameras...
00935     //Find for other cameras position:
00936     vector<ImageLink> images_close;
00937     int nbIter = 0;
00938     while( nbMatches>10 && images_computed.size()<cameras_.size() && nbIter<20 )
00939     {
00940       nbIter++;
00941       images_close.clear( );
00942       while ( images_close.size( ) < 2 && nbMatches>0 )
00943       {
00944         for(size_t cpt = 0; cpt<images_computed.size(); ++cpt )
00945         {
00946           images_graph.getImagesRelatedTo( images_computed[ cpt ],
00947             images_close, nbMatches * 0.8 - 10 );
00948           //remove from images_close those into camera_computed_:
00949           for(size_t cpt1 = 0; cpt1<images_close.size(); ++cpt1 )
00950           {
00951             if( camera_computed_[ images_close[cpt1].imgDest ] &&
00952               camera_computed_[ images_close[cpt1].imgSrc ] )
00953             {
00954               images_close[ cpt1 ] = images_close[ images_close.size() - 1 ];
00955               images_close.pop_back();
00956               cpt1--;
00957             }
00958           }
00959         }
00960         nbMatches = nbMatches * 0.9;
00961         if(nbMatches<0)
00962           nbMatches=0;
00963       }
00964 
00965       //for each images, comptute the camera position:
00966       for( size_t cpt=0;cpt<images_close.size( );cpt++ )
00967       {
00968         //We don't want to compute twice the same camera position:
00969         int new_id_image = -1,
00970           old_id_image = -1;
00971         if( !camera_computed_[ images_close[ cpt ].imgSrc ] )
00972         {
00973           new_id_image = images_close[ cpt ].imgSrc;
00974           old_id_image = images_close[ cpt ].imgDest;
00975         }
00976         else
00977         {
00978           new_id_image = images_close[ cpt ].imgDest;
00979           old_id_image = images_close[ cpt ].imgSrc;
00980         }
00981 
00982         if( new_id_image >= 0 )
00983         {
00984           initialReconstruction( old_id_image, new_id_image );
00985           if( cameraResection( new_id_image, 50*(nbIter/4.0+1.0) ) )
00986           {
00987             images_computed.push_back( new_id_image );
00988             
00989             std::vector< TrackOfPoints > point_before = point_computed_;
00990             cout<<"before"<<point_computed_.size()<<endl;
00991             //addMoreMatches( old_id_image, new_id_image );
00992             cout<<"after"<<point_computed_.size()<<endl;
00993             if( point_computed_.size() < point_before.size() )
00994               point_computed_ = point_before;
00995               
00996             //Triangulate the points:
00997             StructureEstimator se( &sequence_, &this->cameras_ );
00998             point_computed_ = se.computeStructure( images_computed, 2 );
00999           }
01000           else
01001           {
01002             camera_computed_[ new_id_image ] = false;
01003           }
01004         }
01005       }
01006       // Performs a bundle adjustment
01007       bundleAdjustement();
01008     }//*/
01009     
01010     
01011     //Triangulate the points:
01012     StructureEstimator se( &sequence_, &this->cameras_ );
01013     point_computed_ = se.computeStructure( images_computed );
01014     se.removeOutliersTracks( 2 );
01015     
01016   }
01017 
01018   void EuclideanEstimator::viewEstimation( bool coloredPoints )
01019   {
01020     vector<cv::Vec3d> tracks3D;
01021     vector< unsigned int > colors;
01022     vector<TrackOfPoints>::iterator itTrack=point_computed_.begin( );
01023     while ( itTrack != point_computed_.end( ) )
01024     {
01025       tracks3D.push_back( ( cv::Vec3d )( *itTrack ) );
01026       colors.push_back( itTrack->getColor() );
01027       itTrack++;
01028     }
01029 
01031     // Open 3D viewer and add point cloud
01032     Visualizer debugView ( "Debug viewer" );
01033     if( coloredPoints )
01034       debugView.add3DPointsColored( tracks3D,colors, "Euclidean estimated" );
01035     else
01036       debugView.add3DPoints( tracks3D, "Euclidean estimated" );
01037 
01038     for( unsigned int i = 0; i<cameras_.size( ) ; ++i )
01039       //if( camera_computed_[i] )
01040       {
01041         std::stringstream cam_name;
01042         cam_name<<"Cam"<< ( i+1 );
01043         debugView.addCamera( cameras_[ i ],
01044           cam_name.str() );
01045       }
01046 
01047 
01048 
01049       debugView.runInteract( );
01050   }
01051 }
 All Classes Functions Variables