GSoC2011SfM  0.1
Google Summer of Code 2011: Structure from motion
D:/Travail/These/Determination caracteristiques camera/GSoC/SfM/src/PointOfView.cpp
00001 
00002 #include "macro.h" //SFM_EXPORTS & remove annoying warnings
00003 
00004 #include "opencv2/highgui/highgui.hpp"
00005 #include "pcl/visualization/pcl_visualizer.h"
00006 #include <iostream>
00007 #include <pcl/io/vtk_io.h>
00008 #include "opencv2/core/eigen.hpp"
00009 #include "libmv/multiview/projection.h"
00010 
00011 #include "PointOfView.h"
00012 
00013 #include "TracksOfPoints.h"
00014 #include "PCL_mapping.h"
00015 #include "CameraPinholeDistor.h"
00016 
00017 using cv::Mat;
00018 using cv::Vec4d;
00019 using cv::Vec3d;
00020 using cv::Vec2d;
00021 using cv::Vec;
00022 using cv::Range;
00023 using cv::Ptr;
00024 using std::vector;
00025 using std::string;
00026 using cv::imread;
00027 using libmv::Mat34;
00028 using libmv::Mat3;
00029 using libmv::Vec3;
00030 
00031 namespace OpencvSfM{
00032 
00033   PointOfView::PointOfView( cv::Ptr<Camera> device,
00034     cv::Mat rotation /*=Mat::eye( 3, 3, CV_64F )*/,
00035     cv::Vec3d translation /*=Vec( 0.0,0.0,0.0 )*/ )
00036     : projection_matrix_( 3, 4, CV_64F )
00037   {
00038     CV_DbgAssert( rotation.rows==3 && rotation.cols==3 );
00039     CV_DbgAssert( !device.empty( ) );
00040 
00041     this->device_=device;
00042 
00043     this->rotation_=projection_matrix_( Range::all( ),Range( 0,3 ));
00044     rotation.copyTo( this->rotation_ );
00045 
00046     this->translation_ = projection_matrix_( Range::all( ),Range( 3,4 ));
00047     Mat( translation ).copyTo( this->translation_ );
00048 
00049     this->config_=0;//everything should be estimated...
00050 
00051     //as we are a new point of view related to a device, we should add our address into device_:
00052     device_->pointsOfView_.push_back( this );
00053   };
00054 
00055   PointOfView::PointOfView( const PointOfView& ref )
00056   {
00057     this->device_ = ref.device_;
00058 
00059     this->projection_matrix_ = ref.projection_matrix_.clone();
00060 
00061     this->rotation_=projection_matrix_( Range::all( ),Range( 0,3 ));
00062     this->translation_ = projection_matrix_( Range::all( ),Range( 3,4 ));
00063 
00064     this->config_ = ref.config_;
00065 
00066     //as we are a new point of view (a copy of an existing one),
00067     //we should add our address into device_:
00068     device_->pointsOfView_.push_back( this );
00069   };
00070 
00071 
00072   PointOfView::PointOfView( cv::Mat projection_matrix )
00073     : projection_matrix_( 3, 4, CV_64F )
00074   {
00075     //convert to libmv matrix:
00076     libmv::Mat34 proj;
00077     cv::cv2eigen( projection_matrix, proj );
00078     libmv::Mat3 K, R;
00079     libmv::Vec3 t;
00080     libmv::KRt_From_P( proj, &K, &R, &t );
00081 
00082     if (R.determinant() < 0) {
00083       K.col(0) = -K.col(0);
00084       R.row(0) = -R.row(0);
00085       t(0)     = -t(0);
00086     }
00087 
00088     //enforce the rotation matrix.
00089     //TODO find the closest rotation matrix...
00090     Eigen::Quaterniond tmp = (Eigen::Quaterniond)R;
00091     tmp.normalize();
00092     R = tmp.toRotationMatrix();
00093 
00094     //create the device:
00095     cv::Mat K_cv;
00096     cv::eigen2cv( K, K_cv );
00097     device_ = cv::Ptr<Camera>( new CameraPinhole( K_cv ) );
00098 
00099     proj<<R,t;
00100     cv::eigen2cv( proj, projection_matrix_ );
00101 
00102     this->rotation_=projection_matrix_( Range::all( ),Range( 0,3 ));
00103 
00104     this->translation_ = projection_matrix_( Range::all( ),Range( 3,4 ));
00105 
00106     this->config_=0;//everything should be estimated...
00107 
00108     //as we are a new point of view related to a device, we should add our address into device_:
00109     device_->pointsOfView_.push_back( this );
00110 
00111   };
00112 
00113   PointOfView::~PointOfView( void )
00114   {
00115     this->projection_matrix_.release( );
00116 
00117     //remove the reference in device_->pointsOfView_:
00118     vector<PointOfView*>::iterator ourRef=device_->pointsOfView_.begin( );
00119     bool isFound=false;
00120     while( !isFound && ourRef != device_->pointsOfView_.end( ) )
00121     {
00122       //be aware of NULL pointor:
00123       if( ( *ourRef ) == this )
00124       {
00125         isFound=true;
00126         ( *ourRef )=NULL;
00127       }
00128       ourRef++;
00129     }
00130   }
00131 
00132   uchar PointOfView::getNbMissingParams( ) const
00133   {
00134     int nbParams = 0;
00135     if( (config_ & ROTATION_OK) == 0 )
00136       nbParams += 3;//quaternion's vector part...
00137     if( (config_ & TRANSLATION_OK) == 0 )
00138       nbParams += 3;//translation's vector part...
00139     return nbParams + device_->getNbMissingParams();
00140   }
00141 
00142   cv::Vec2d PointOfView::project3DPointIntoImage( cv::Vec3d point ) const
00143   {
00144     //As we don't know what type of camera we use ( with/without disportion, fisheyes... )
00145     //we can't use classic projection matrix P = K . [ R|t ]
00146     //Instead, we first compute points transformation into camera's system and then compute
00147     //pixel coordinate using camera device function.
00148 
00149     //As we need Mat object to compute projection, we create temporary objects:
00150     Mat mat3DNorm( 4,1,CV_64F );
00151     double* point3DNorm=( double* )mat3DNorm.data;
00152     Mat mat2DNorm( 3,1,CV_64F );
00153     double* point2DNorm=( double* )mat2DNorm.data;
00154 
00155     vector<Vec2d> pointsOut;
00156     point3DNorm[ 0 ] = point[ 0 ];
00157     point3DNorm[ 1 ] = point[ 1 ];
00158     point3DNorm[ 2 ] = point[ 2 ];
00159     point3DNorm[ 3 ] = 1;
00160 
00161     //transform points into camera's coordinates:
00162     mat2DNorm = ( projection_matrix_ * mat3DNorm );
00163     pointsOut.push_back( Vec2d( point2DNorm[ 0 ]/point2DNorm[ 2 ],point2DNorm[ 1 ]/point2DNorm[ 2 ] ));
00164 
00165     //transform points into pixel coordinates using camera intra parameters:
00166     pointsOut = device_->normImageToPixelCoordinates( pointsOut );
00167     Vec2d pointOut = pointsOut[ 0 ];
00168     return pointsOut[ 0 ];
00169   }
00170   std::vector< cv::Vec2d > PointOfView::project3DPointsIntoImage(
00171     std::vector< cv::Vec3d > points ) const
00172   {
00173     //As we don't know what type of camera we use ( with/without disportion, fisheyes... )
00174     //we can't use classic projection matrix P = K . [ R|t ]
00175     //Instead, we first compute points transformation into camera's system and then compute
00176     //pixel coordinate using camera device function.
00177 
00178     //As we need Mat object to compute projection, we create temporary objects:
00179     Mat mat3DNorm( 4,1,CV_64F );
00180     double* point3DNorm=( double* )mat3DNorm.data;
00181     Mat mat2DNorm( 3,1,CV_64F );
00182     double* point2DNorm=( double* )mat2DNorm.data;
00183 
00184     vector<Vec2d> pointsOut;
00185     vector<Vec3d>::iterator point=points.begin( );
00186     while( point!=points.end( ) )
00187     {
00188       point3DNorm[ 0 ] = ( *point )[ 0 ];
00189       point3DNorm[ 1 ] = ( *point )[ 1 ];
00190       point3DNorm[ 2 ] = ( *point )[ 2 ];
00191       point3DNorm[ 3 ] = 1;
00192 
00193       //transform points into camera's coordinates:
00194       mat2DNorm = ( projection_matrix_ * mat3DNorm );
00195 
00196       pointsOut.push_back( Vec2d( point2DNorm[ 0 ]/point2DNorm[ 2 ],point2DNorm[ 1 ]/point2DNorm[ 2 ] ));
00197 
00198       point++;
00199     }
00200     //transform points into pixel coordinates using camera intra parameters:
00201     return device_->normImageToPixelCoordinates( pointsOut );
00202   }
00203   std::vector< cv::Vec2d > PointOfView::project3DPointsIntoImage(
00204     std::vector<TrackOfPoints> points ) const
00205   {
00206     //As we don't know what type of camera we use ( with/without disportion, fisheyes... )
00207     //we can't use classic projection matrix P = K . [ R|t ]
00208     //Instead, we first compute points transformation into camera's system and then compute
00209     //pixel coordinate using camera device function.
00210 
00211     //As we need Mat object to compute projection, we create temporary objects:
00212     Mat mat3DNorm( 4,1,CV_64F );
00213     double* point3DNorm=( double* )mat3DNorm.data;
00214     Mat mat2DNorm( 3,1,CV_64F );
00215     double* point2DNorm=( double* )mat2DNorm.data;
00216 
00217     vector<Vec2d> pointsOut;
00218     vector<TrackOfPoints>::iterator point=points.begin( );
00219     while( point!=points.end( ) )
00220     {
00221       cv::Ptr<Vec3d> convert_from_track = point->get3DPosition();
00222       if( !convert_from_track.empty() )
00223       {
00224         point3DNorm[ 0 ] = (*convert_from_track)[ 0 ];
00225         point3DNorm[ 1 ] = (*convert_from_track)[ 1 ];
00226         point3DNorm[ 2 ] = (*convert_from_track)[ 2 ];
00227         point3DNorm[ 3 ] = 1;
00228 
00229         //transform points into camera's coordinates:
00230         mat2DNorm = ( projection_matrix_ * mat3DNorm );
00231 
00232         pointsOut.push_back( Vec2d( point2DNorm[ 0 ]/point2DNorm[ 2 ],point2DNorm[ 1 ]/point2DNorm[ 2 ] ));
00233       }
00234 
00235       point++;
00236     }
00237     //transform points into pixel coordinates using camera intra parameters:
00238     return device_->normImageToPixelCoordinates( pointsOut );
00239   }
00240 
00241   bool PointOfView::pointInFrontOfCamera( cv::Vec4d point ) const
00242   {
00243     Mat pointTranspose= ( Mat ) point;
00244     double condition_1 =
00245       this->projection_matrix_.row( 2 ).dot( pointTranspose.t( ) ) * point[ 3 ];
00246     double condition_2 = point[ 2 ] * point[ 3 ];
00247     if( condition_1 > 0 && condition_2 > 0 )
00248       return true;
00249     else
00250       return false;
00251   }
00252   cv::Mat PointOfView::getProjectionMatrix( ) const
00253   {
00254     return device_->getIntraMatrix( ) * projection_matrix_;
00255   };
00256 
00257 
00258   cv::Ptr<PointOfView> PointOfView::read( const cv::FileNode& node )
00259   {
00260     std::string myName=node.name( );
00261     if( myName != "PointOfView" )
00262     {
00263       std::string error = "FileNode is not correct!\nExpected \"PointOfView\", got ";
00264       error += node.name();
00265       CV_Error( CV_StsError, error.c_str() );
00266     }
00267     cv::FileNodeIterator it = node.begin();
00268     cv::Mat rot,trans;
00269     cv::Ptr<Camera> device;
00270 
00271     while( it != node.end() )
00272     {
00273       if((*it).name()=="rotation_")
00274         (*it)>>rot;
00275       if((*it).name()=="translation_")
00276         (*it)>>trans;
00277       if((*it).name()=="CameraPinhole")
00278         device = CameraPinhole::read( *it );
00279       if((*it).name()=="CameraPinholeDistor")
00280         device = CameraPinholeDistor::read( *it );
00281       it++;
00282     }
00283     return cv::Ptr<PointOfView>( new PointOfView(device, rot, trans) );
00284   }
00285 
00286   void PointOfView::write( cv::FileStorage& fs, const PointOfView& pov )
00287   {
00288     fs <<"{" << "PointOfView" <<"{" << "rotation_" << pov.rotation_;
00289     fs << "translation_" << pov.translation_;
00290     pov.device_->write( fs );
00291     fs << "}" << "}";
00292   }
00293   
00294   void PointOfView::print( std::ostream &flux ) const
00295   {
00296     flux << "Intra parameters: " << endl << device_->getIntraMatrix()<<endl;
00297     flux << "Rotation matrix: " << endl << rotation_<<endl;
00298     flux << "Translation vector: " << endl << translation_<<endl;
00299   }
00300 
00301 }
00302 
00303 std::ostream& operator<<( std::ostream &flux, OpencvSfM::PointOfView const & pov )
00304 {
00305   pov.print(flux);
00306   return flux;
00307 }
 All Classes Functions Variables