Exemple #1
0
void computeSensorOffsetAndK(Eigen::Isometry3f &sensorOffset, Eigen::Matrix3f &cameraMatrix, ParameterCamera *cameraParam, int reduction) {
  sensorOffset = Isometry3f::Identity();
  cameraMatrix.setZero();
      
  int cmax = 4;
  int rmax = 3;
  for (int c=0; c<cmax; c++){
    for (int r=0; r<rmax; r++){
      sensorOffset.matrix()(r, c) = cameraParam->offset()(r, c);
      if (c<3)
	cameraMatrix(r,c) = cameraParam->Kcam()(r, c);
    }
  }
  sensorOffset.translation() = Vector3f(0.15f, 0.0f, 0.05f);
  Quaternionf quat = Quaternionf(0.5, -0.5, 0.5, -0.5);
  sensorOffset.linear() = quat.toRotationMatrix();
  sensorOffset.matrix().block<1, 4>(3, 0) << 0.0f, 0.0f, 0.0f, 1.0f;
	
  float scale = 1./reduction;
  cameraMatrix *= scale;
  cameraMatrix(2,2) = 1;
}
  bool extractRelativePrior(Eigen::Isometry3f& priorMean, Matrix6f& priorInfo, 
			    const DrawableFrame* reference, const DrawableFrame* current){
    VertexSE3* referenceVertex =reference->_vertex;
    VertexSE3* currentVertex =current->_vertex;
    bool priorFound = false;
    priorInfo.setZero();
    for (HyperGraph::EdgeSet::const_iterator it=referenceVertex->edges().begin();
	 it!= referenceVertex->edges().end(); it++){
      const EdgeSE3* e = dynamic_cast<const EdgeSE3*>(*it);
      if (e->vertex(0)==referenceVertex && e->vertex(1) == currentVertex){
	priorFound=true;
	for (int c=0; c<6; c++)
	  for (int r=0; r<6; r++)
	    priorInfo(r,c) = e->information()(r,c);

	for(int c=0; c<4; c++)
	  for(int r=0; r<3; r++)
	    priorMean.matrix()(r,c) = e->measurement().matrix()(r,c);
	priorMean.matrix().row(3) << 0,0,0,1;
      }
    }
    return priorFound;
  }
 void PinholePointProjector::_updateMatrices() {
   Eigen::Isometry3f t =_transform.inverse();
   t.matrix().block<1, 4>(3, 0) << 0.0f, 0.0f, 0.0f, 1.0f;
   _iK = _cameraMatrix.inverse();
   _KR = _cameraMatrix * t.linear();
   _Kt = _cameraMatrix * t.translation();
   _iKR = _transform.linear() * _iK;
   _iKt = _transform.translation();
   _KRt.setIdentity();
   _iKRt.setIdentity();
   _KRt.block<3, 3>(0, 0) = _KR; 
   _KRt.block<3, 1>(0, 3) = _Kt;
   _iKRt.block<3, 3>(0, 0) = _iKR; 
   _iKRt.block<3, 1>(0, 3) = _iKt;
 }
Exemple #4
0
  void Aligner::_computeStatistics(Vector6f &mean, Matrix6f &Omega, 
				   float &translationalRatio, float &rotationalRatio) const {
    typedef SigmaPoint<Vector6f> SigmaPoint;
    typedef std::vector<SigmaPoint, Eigen::aligned_allocator<SigmaPoint> > SigmaPointVector;
  
    // Output init
    Vector6f b;
    Matrix6f H;
    b.setZero();
    H.setZero();
    translationalRatio = std::numeric_limits<float>::max();
    rotationalRatio = std::numeric_limits<float>::max();

    Eigen::Isometry3f invT = _T.inverse();
    invT.matrix().block<1, 4>(3, 0) << 0.0f, 0.0f, 0.0f, 1.0f;
    _linearizer->setT(invT);
    _linearizer->update();
    H += _linearizer->H() + Matrix6f::Identity();
    b += _linearizer->b();

    JacobiSVD<Matrix6f> svd(H, Eigen::ComputeThinU | Eigen::ComputeThinV);
    Matrix6f localSigma = svd.solve(Matrix6f::Identity());
    SigmaPointVector sigmaPoints;
    Vector6f localMean = Vector6f::Zero();
    sampleUnscented(sigmaPoints, localMean, localSigma);
  
    Eigen::Isometry3f dT = _T;  // Transform from current to reference
    
    // Remap each of the sigma points to their original position
    //#pragma omp parallel 
    for (size_t i = 0; i < sigmaPoints.size(); i++) {
      SigmaPoint &p = sigmaPoints[i];
      p._sample = t2v(dT * v2t(p._sample).inverse());
    }
    // Reconstruct the gaussian 
    reconstructGaussian(mean, localSigma, sigmaPoints);

    // Compute the information matrix from the covariance
    Omega = localSigma.inverse();
  
    // Have a look at the svd of the rotational and the translational part;
    JacobiSVD<Matrix3f> partialSVD;
    partialSVD.compute(Omega.block<3, 3>(0, 0));
    translationalRatio = partialSVD.singularValues()(0) / partialSVD.singularValues()(2);
    
    partialSVD.compute(Omega.block<3, 3>(3, 3));
    rotationalRatio = partialSVD.singularValues()(0) / partialSVD.singularValues()(2);
  }
Exemple #5
0
void Aligner::align() {
  struct timeval tvStart, tvEnd;
  gettimeofday(&tvStart,0);

  if (! _projector || !_linearizer || !_correspondenceGenerator){
    cerr << "I do nothing since you did not set all required algorithms" << endl;
    return;
  }
  // the current points are seen from the frame of the sensor
  _projector->setTransform(_sensorOffset);
  _projector->project(_correspondenceGenerator->currentIndexImage(),
		      _correspondenceGenerator->currentDepthImage(),
		      _currentScene->points());
  _T = _initialGuess;
  for(int i = 0; i < _outerIterations; i++) {
    //cout << "********************* Iteration " << i << " *********************" << endl;
    
    /************************************************************************
     *                         Correspondence Computation                   *
     ************************************************************************/
    //cout << "Computing correspondences...";
    
    // compute the indices of the current scene from the point of view of the sensor
    _projector->setTransform(_T*_sensorOffset);
    _projector->project(_correspondenceGenerator->referenceIndexImage(),
			_correspondenceGenerator->referenceDepthImage(),
			_referenceScene->points());
    
    // Correspondences computation.    
    _correspondenceGenerator->compute(*_referenceScene, *_currentScene, _T.inverse());

    //cout << " done." << endl;
    //cout << "# inliers found: " << _correspondenceGenerator->numCorrespondences() << endl;

    /************************************************************************
     *                            Alignment                                 *
     ************************************************************************/

    Eigen::Isometry3f invT = _T.inverse();
    for (int k = 0; k < _innerIterations; k++) {      
      invT.matrix().block<1, 4>(3, 0) << 0, 0, 0, 1;
      Matrix6f H;
      Vector6f b;

      _linearizer->setT(invT);
      _linearizer->update();
      H = _linearizer->H() + Matrix6f::Identity() * 10.0f;
      b = _linearizer->b();
      
      // add the priors
      for (size_t i=0; i<_priors.size(); i++){
	const SE3Prior* prior = _priors[i];
	Vector6f priorError = prior->error(invT);
	Matrix6f priorJacobian = prior->jacobian(invT);
	Matrix6f priorInformationRemapped = prior->errorInformation(invT);

	Matrix6f Hp = priorJacobian.transpose()*priorInformationRemapped*priorJacobian;
	Vector6f bp = priorJacobian.transpose()*priorInformationRemapped*priorError;

	H += Hp;
	b += bp;
      }


      Vector6f dx = H.ldlt().solve(-b);
      Eigen::Isometry3f dT = v2t(dx);
      invT = dT * invT;
    }
    _T = invT.inverse();
    _T.matrix().block<1, 4>(3, 0) << 0, 0, 0, 1;
  }

  gettimeofday(&tvEnd, 0);
  double tStart = tvStart.tv_sec*1000+tvStart.tv_usec*0.001;
  double tEnd = tvEnd.tv_sec*1000+tvEnd.tv_usec*0.001;
  _totalTime = tEnd - tStart;
  _error = _linearizer->error();
  _inliers = _linearizer->inliers();
}
  void ViewerState::load(const std::string& filename){
    clear();
    listWidget->clear();
    graph->clear();
    graph->load(filename.c_str());

    vector<int> vertexIds(graph->vertices().size());
    int k=0;
    for (OptimizableGraph::VertexIDMap::iterator it = graph->vertices().begin(); it != graph->vertices().end(); ++it) {
      vertexIds[k++] = (it->first);
    }

    sort(vertexIds.begin(), vertexIds.end());

    listAssociations.clear();
    size_t maxCount = 20000;
    for(size_t i = 0; i < vertexIds.size() &&  i< maxCount; ++i) {
      OptimizableGraph::Vertex* _v = graph->vertex(vertexIds[i]);
      g2o::VertexSE3* v = dynamic_cast<g2o::VertexSE3*>(_v);
      if (! v)
	continue;
      OptimizableGraph::Data* d = v->userData();
      while(d) {
	RGBDData* rgbdData = dynamic_cast<RGBDData*>(d);
	if (!rgbdData){
	  d=d->next();
	  continue;
	}
	// retrieve from the rgb data the index of the parameter
	int paramIndex = rgbdData->paramIndex();
	// retrieve from the graph the parameter given the index  
	g2o::Parameter* _cameraParam = graph->parameter(paramIndex);
	// attempt a cast to a parameter camera  
	ParameterCamera* cameraParam = dynamic_cast<ParameterCamera*>(_cameraParam);
	if (! cameraParam){
	  cerr << "shall thou be damned forever" << endl;
	  return;
	}
	// yayyy we got the parameter
	Eigen::Matrix3f cameraMatrix;
	Eigen::Isometry3f sensorOffset;
	cameraMatrix.setZero();
      
	int cmax = 4;
	int rmax = 3;
	for (int c=0; c<cmax; c++){
	  for (int r=0; r<rmax; r++){
	    sensorOffset.matrix()(r,c)= cameraParam->offset()(r, c);
	    if (c<3)
	      cameraMatrix(r,c) = cameraParam->Kcam()(r, c);
	  }
	}
	char buf[1024];
	sprintf(buf,"%d",v->id());
	QString listItem(buf);
	listAssociations.push_back(rgbdData);
	listWidget->addItem(listItem);
	d=d->next();
      }
    }

  }
int main (int argc, char** argv) {
    // Handle input
    float pointSize;
    float pointStep;
    float alpha;
    int applyTransform;
    int step;
    string logFilename;
    string configFilename;
    float di_scaleFactor;
    float scale;

    g2o::CommandArgs arg;
    arg.param("vz_pointSize", pointSize, 1.0f, "Size of the points where are visualized");
    arg.param("vz_transform", applyTransform, 1, "Choose if you want to apply the absolute transform of the point clouds");
    arg.param("vz_step", step, 1, "Visualize a point cloud each vz_step point clouds");
    arg.param("vz_alpha", alpha, 1.0f, "Alpha channel value used for the color points");
    arg.param("vz_pointStep", pointStep, 1, "Step at which point are drawn");
    arg.param("vz_scale", scale, 2, "Depth image size reduction factor");
    arg.param("di_scaleFactor", di_scaleFactor, 0.001f, "Scale factor to apply to convert depth images in meters");
    arg.paramLeftOver("configFilename", configFilename, "", "Configuration filename", true);
    arg.paramLeftOver("logFilename", logFilename, "", "Log filename", true);
    arg.parseArgs(argc, argv);

    // Create GUI
    QApplication application(argc,argv);
    QWidget *mainWindow = new QWidget();
    mainWindow->setWindowTitle("pwn_tracker_log_viewer");
    QHBoxLayout *hlayout = new QHBoxLayout();
    mainWindow->setLayout(hlayout);
    QVBoxLayout *vlayout = new QVBoxLayout();
    hlayout->addItem(vlayout);
    QVBoxLayout *vlayout2 = new QVBoxLayout();
    hlayout->addItem(vlayout2);
    hlayout->setStretch(1, 1);

    QListWidget* listWidget = new QListWidget(mainWindow);
    listWidget->setSelectionMode(QAbstractItemView::MultiSelection);
    vlayout->addWidget(listWidget);
    PWNQGLViewer* viewer = new PWNQGLViewer(mainWindow);
    vlayout2->addWidget(viewer);
    Eigen::Isometry3f T;
    T.setIdentity();
    T.matrix().row(3) << 0.0f, 0.0f, 0.0f, 1.0f;

    // Read config file
    cout << "Loading config file..." << endl;
    Aligner *aligner;
    DepthImageConverter *converter;
    std::vector<Serializable*> instances = readConfig(aligner, converter, configFilename.c_str());
    converter->projector()->scale(1.0f/scale);
    cout << "... done" << endl;

    // Read and parse log file
    std::vector<boss::Serializable*> objects;
    std::vector<PwnTrackerFrame*> trackerFrames;
    std::vector<PwnTrackerRelation*> trackerRelations;
    Deserializer des;
    des.setFilePath(logFilename);
    cout << "Loading log file..." << endl;
    load(trackerFrames, trackerRelations, objects, des, step);
    cout << "... done" << endl;

    // Load the drawable list with the PwnTrackerFrame objects
    std::vector<Frame*> pointClouds;
    pointClouds.resize(trackerFrames.size());
    Frame *dummyFrame = 0;
    std::fill(pointClouds.begin(), pointClouds.end(), dummyFrame);
    for(size_t i = 0; i < trackerFrames.size(); i++) {
        PwnTrackerFrame *pwnTrackerFrame = trackerFrames[i];
        char nummero[1024];
        sprintf(nummero, "%05d", (int)i);
        listWidget->addItem(QString(nummero));
        QListWidgetItem *lastItem = listWidget->item(listWidget->count() - 1);

        Isometry3f transform = Isometry3f::Identity();
        if(applyTransform) {
            isometry3d2f(transform, pwnTrackerFrame->transform());
            transform = transform*pwnTrackerFrame->sensorOffset;
        }
        transform.matrix().row(3) << 0.0f, 0.0f, 0.0f, 1.0f;

        GLParameterFrame *frameParams = new GLParameterFrame();
        frameParams->setStep(pointStep);
        frameParams->setShow(false);
        DrawableFrame* drawableFrame = new DrawableFrame(transform, frameParams, pointClouds[i]);
        viewer->addDrawable(drawableFrame);
    }

    // Manage GUI
    viewer->init();
    mainWindow->show();
    viewer->show();
    listWidget->show();
    viewer->setAxisIsDrawn(true);
    bool selectionChanged = false;
    QListWidgetItem *item = 0;
    DepthImage depthImage;
    DepthImage scaledDepthImage;
    while (mainWindow->isVisible()) {
        selectionChanged = false;
        for (int i = 0; i<listWidget->count(); i++) {
            item = 0;
            item = listWidget->item(i);
            DrawableFrame *drawableFrame = dynamic_cast<DrawableFrame*>(viewer->drawableList()[i]);
            if (item && item->isSelected()) {
                if(!drawableFrame->parameter()->show()) {
                    drawableFrame->parameter()->setShow(true);
                    selectionChanged = true;
                }
                if(pointClouds[i] == 0) {
                    pointClouds[i] = new Frame();
                    boss_map::ImageBLOB* fromDepthBlob = trackerFrames[i]->depthImage.get();
                    DepthImage depthImage;
                    depthImage.fromCvMat(fromDepthBlob->cvImage());
                    DepthImage::scale(scaledDepthImage, depthImage, scale);
                    converter->compute(*pointClouds[i], scaledDepthImage);
                    drawableFrame->setFrame(pointClouds[i]);
                    delete fromDepthBlob;
                }
            } else {
                drawableFrame->parameter()->setShow(false);
                selectionChanged = true;
            }
        }
        if (selectionChanged)
            viewer->updateGL();

        application.processEvents();
    }

    return 0;
}
Exemple #8
0
  void Merger::merge(Cloud *cloud, Eigen::Isometry3f transform) {
    assert(_indexImage.rows > 0 && _indexImage.cols > 0 && "Merger: _indexImage has zero size");  
    assert(_depthImageConverter  && "Merger: missing _depthImageConverter");  
    assert(_depthImageConverter->projector()  && "Merger: missing projector in _depthImageConverter");  

    PointProjector *pointProjector = _depthImageConverter->projector();
    Eigen::Isometry3f oldTransform = pointProjector->transform();
    
    pointProjector->setTransform(transform);
    pointProjector->project(_indexImage, 
			    _depthImage, 
			    cloud->points());

    int target = 0;
    int distance = 0;
    _collapsedIndices.resize(cloud->points().size());
    std::fill(_collapsedIndices.begin(), _collapsedIndices.end(), -1);

    int killed = 0;
    int currentIndex = 0;
    for(size_t i = 0; i < cloud->points().size(); currentIndex++ ,i++) {
      const Point currentPoint = cloud->points()[i];
      // const Normal currentNormal = cloud->normals()[i];

      int r = -1, c = -1;
      float depth = 0.0f;
      pointProjector->project(c, r, depth, currentPoint);
      if(depth < 0 || depth > _maxPointDepth || 
	 r < 0 || r >= _depthImage.rows || 
	 c < 0 || c >= _depthImage.cols) {
	distance++;
	continue;
      }
        
      float &targetZ = _depthImage(r, c);
      int targetIndex = _indexImage(r, c);
      if(targetIndex < 0) {
	target++;
	continue;
      }
      // const Normal &targetNormal = cloud->normals().at(targetIndex);

      Eigen::Vector4f viewPointDirection = transform.matrix().col(3)-currentPoint;
      viewPointDirection(3)=0;
      if(targetIndex == currentIndex) {
	_collapsedIndices[currentIndex] = currentIndex;
      } 
      else if(fabs(depth - targetZ) < _distanceThreshold /*&& 
	      currentNormal.dot(targetNormal) > _normalThreshold &&
	      (viewPointDirection.dot(targetNormal)>cos(0))*/ ) {
	Gaussian3f &targetGaussian = cloud->gaussians()[targetIndex];
	Gaussian3f &currentGaussian = cloud->gaussians()[currentIndex];
	targetGaussian.addInformation(currentGaussian);
	_collapsedIndices[currentIndex] = targetIndex;
	killed++;
      }
    }

    int murdered = 0;
    int k = 0;  
    for(size_t i = 0; i < _collapsedIndices.size(); i++) {
      int collapsedIndex = _collapsedIndices[i];
      if(collapsedIndex == (int)i) {
	cloud->points()[i].head<3>() = cloud->gaussians()[i].mean();
      }
      if(collapsedIndex < 0 || collapsedIndex == (int)i) {
	cloud->points()[k] = cloud->points()[i];
	cloud->normals()[k] = cloud->normals()[i];
	cloud->stats()[k] = cloud->stats()[i];
	cloud->pointInformationMatrix()[k] = cloud->pointInformationMatrix()[i];
	cloud->normalInformationMatrix()[k] = cloud->normalInformationMatrix()[i];
	cloud->gaussians()[k] = cloud->gaussians()[i];
	if(cloud->rgbs().size())
	  cloud->rgbs()[k]=cloud->rgbs()[i];
	k++;
      } 
      else {
	murdered ++;
      }
    }    
    int originalSize = cloud->points().size();
    
    // Kill the leftover points
    cloud->points().resize(k);
    cloud->normals().resize(k);
    cloud->stats().resize(k);
    cloud->pointInformationMatrix().resize(k);
    cloud->normalInformationMatrix().resize(k);
    if(cloud->rgbs().size())
      cloud->rgbs().resize(k);
    std::cout << "[INFO]: number of suppressed points " << murdered << std::endl;
    std::cout << "[INFO]: resized cloud from " << originalSize << " to " << k << " points" <<std::endl;
    
    pointProjector->setTransform(oldTransform);
  }
Exemple #9
0
  void Aligner::align() {
    assert(_projector && "Aligner: missing _projector");
    assert(_linearizer && "Aligner: missing _linearizer");
    assert(_correspondenceFinder && "Aligner: missing _correspondenceFinder");
    assert(_referenceCloud && "Aligner: missing _referenceCloud");
    assert(_currentCloud && "Aligner: missing _currentCloud");

    struct timeval tvStart, tvEnd;
    gettimeofday(&tvStart, 0);

    // The current points are seen from the frame of the sensor
    _projector->setTransform(_currentSensorOffset);
    _projector->project(_correspondenceFinder->currentIndexImage(),
			_correspondenceFinder->currentDepthImage(),
			_currentCloud->points());
    _T = _initialGuess;
        
    for(int i = 0; i < _outerIterations; i++) {
      /************************************************************************
       *                         Correspondence Computation                   *
       ************************************************************************/

      // Compute the indices of the current scene from the point of view of the sensor
      _T.matrix().row(3) << 0.0f, 0.0f, 0.0f, 1.0f;
      _projector->setTransform(_T * _referenceSensorOffset);
      _projector->project(_correspondenceFinder->referenceIndexImage(),
			  _correspondenceFinder->referenceDepthImage(),
			  _referenceCloud->points());
    
      // Correspondences computation.  
      _correspondenceFinder->compute(*_referenceCloud, *_currentCloud, _T.inverse());
 
      /************************************************************************
       *                            Alignment                                 *
       ************************************************************************/
      Eigen::Isometry3f invT = _T.inverse();
      for(int k = 0; k < _innerIterations; k++) {      
	invT.matrix().block<1, 4>(3, 0) << 0.0f, 0.0f, 0.0f, 1.0f;
	Matrix6f H;
	Vector6f b;

	_linearizer->setT(invT);
	_linearizer->update();
	H = _linearizer->H() + Matrix6f::Identity();
	b = _linearizer->b();
	H += Matrix6f::Identity() * 1000.0f;

	// Add the priors
	for(size_t j = 0; j < _priors.size(); j++) {
	  const SE3Prior *prior = _priors[j];
	  Vector6f priorError = prior->error(invT);
	  Matrix6f priorJacobian = prior->jacobian(invT);
	  Matrix6f priorInformationRemapped = prior->errorInformation(invT);

	  Matrix6f Hp = priorJacobian.transpose() * priorInformationRemapped * priorJacobian;
	  Vector6f bp = priorJacobian.transpose() * priorInformationRemapped * priorError;

	  H += Hp;
	  b += bp;
	}
      
	Vector6f dx = H.ldlt().solve(-b);
	Eigen::Isometry3f dT = v2t(dx);
	invT = dT * invT;
      }
      
      _T = invT.inverse();
      _T = v2t(t2v(_T));
      _T.matrix().block<1, 4>(3, 0) << 0.0f, 0.0f, 0.0f, 1.0f;
    }

    gettimeofday(&tvEnd, 0);
    double tStart = tvStart.tv_sec * 1000.0f + tvStart.tv_usec * 0.001f;
    double tEnd = tvEnd.tv_sec * 1000.0f + tvEnd.tv_usec * 0.001f;
    _totalTime = tEnd - tStart;
    _error = _linearizer->error();
    _inliers = _linearizer->inliers();

    _computeStatistics(_mean, _omega, _translationalEigenRatio, _rotationalEigenRatio);
    if (_rotationalEigenRatio > _rotationalMinEigenRatio || 
	_translationalEigenRatio > _translationalMinEigenRatio) {
      if (_debug) {
	cerr << endl;
	cerr << "************** WARNING SOLUTION MIGHT BE INVALID (eigenratio failure) **************" << endl;
	cerr << "tr: " << _translationalEigenRatio << " rr: " << _rotationalEigenRatio << endl;
	cerr << "************************************************************************************" << endl;
      }
    } 
    else {
      if (_debug) {
	cerr << "************** I FOUND SOLUTION VALID SOLUTION   (eigenratio ok) *******************" << endl;
	cerr << "tr: " << _translationalEigenRatio << " rr: " << _rotationalEigenRatio << endl;
	cerr << "************************************************************************************" << endl;
      }
    }
    if (_debug) {
      cout << "Solution statistics in (t, mq): " << endl;
      cout << "mean: " << _mean.transpose() << endl;
      cout << "Omega: " << endl;
      cout << _omega << endl;
    }
  }
  void TwoDepthImageAlignerNode::processNode(MapNode* node_){
   cerr << "START ITERATION" << endl;
   std::vector<Serializable*> crearedObjects;
    SensingFrameNode* sensingFrame = dynamic_cast<SensingFrameNode*>(node_);
    if (! sensingFrame)
      return;
    
    PinholeImageData* image = dynamic_cast<PinholeImageData*>(sensingFrame->sensorData(_topic));
    if (! image)
      return;
    cerr << "got image"  << endl;
    
    Eigen::Isometry3d _sensorOffset = _config->sensorOffset(image->baseSensor());
    
    // cerr << "sensorOffset: " << endl;
    // cerr << _sensorOffset.matrix() << endl;

    Eigen::Isometry3f sensorOffset;
    convertScalar(sensorOffset,_sensorOffset);
    sensorOffset.matrix().row(3) << 0,0,0,1;

    Eigen::Matrix3d _cameraMatrix = image->cameraMatrix();
    
    ImageBLOB* blob = image->imageBlob().get();
    
    DepthImage depthImage;
    depthImage.fromCvMat(blob->cvImage());
    int r=depthImage.rows();
    int c=depthImage.cols();
    
    DepthImage scaledImage;
    Eigen::Matrix3f cameraMatrix;
    convertScalar(cameraMatrix,_cameraMatrix);
    
    //computeScaledParameters(r,c,cameraMatrix,_scale);
    PinholePointProjector* projector=dynamic_cast<PinholePointProjector*>(_converter->projector());
    cameraMatrix(2,2)=1;
    projector->setCameraMatrix(cameraMatrix);
    projector->setImageSize(depthImage.rows(), depthImage.cols());
    projector->scale(1.0/_scale);

    DepthImage::scale(scaledImage,depthImage,_scale);
    pwn::Frame* frame = new pwn::Frame;
    _converter->compute(*frame,scaledImage, sensorOffset);
  
    MapNodeBinaryRelation* odom=0;

    std::vector<MapNode*> oneNode(1);
    oneNode[0]=sensingFrame;
    MapNodeUnaryRelation* imu = extractRelation<MapNodeUnaryRelation>(oneNode);
    
    if (_previousFrame){
      _aligner->setReferenceSensorOffset(_aligner->currentSensorOffset());
      _aligner->setCurrentSensorOffset(sensorOffset);
      _aligner->setReferenceFrame(_previousFrame);
      _aligner->setCurrentFrame(frame);
      
      PinholePointProjector* projector=(PinholePointProjector*)(_aligner->projector());
      projector->setCameraMatrix(cameraMatrix);
      projector->setImageSize(depthImage.rows(), depthImage.cols());
      projector->scale(1.0/_scale);
      _aligner->correspondenceFinder()->setImageSize(projector->imageRows(), projector->imageCols());

      /*
	cerr << "correspondenceFinder: "  << r << " " << c << endl; 
	cerr << "sensorOffset" << endl;
	cerr <<_aligner->currentSensorOffset().matrix() << endl;
	cerr <<_aligner->referenceSensorOffset().matrix() << endl;
	cerr << "cameraMatrix" << endl;
	cerr << projector->cameraMatrix() << endl;
      */

      std::vector<MapNode*> twoNodes(2);
      twoNodes[0]=_previousSensingFrameNode;
      twoNodes[1]=sensingFrame;
      odom = extractRelation<MapNodeBinaryRelation>(twoNodes);
      cerr << "odom:" << odom << " imu:" << imu << endl;

      Eigen::Isometry3f guess= Eigen::Isometry3f::Identity();
      _aligner->clearPriors();
      if (odom){
      	Eigen::Isometry3f mean;
      	Eigen::Matrix<float,6,6> info;
      	convertScalar(mean,odom->transform());
	mean.matrix().row(3) << 0,0,0,1;
	convertScalar(info,odom->informationMatrix());
	//cerr << "odom: " << t2v(mean).transpose() << endl;
	_aligner->addRelativePrior(mean,info);
 	//guess = mean;
      } 

      if (imu){
      	Eigen::Isometry3f mean;
      	Eigen::Matrix<float,6,6> info;
      	convertScalar(mean,imu->transform());
      	convertScalar(info,imu->informationMatrix());
	mean.matrix().row(3) << 0,0,0,1;
	//cerr << "imu: " << t2v(mean).transpose() << endl;
	_aligner->addAbsolutePrior(_globalT,mean,info);
      }
      _aligner->setInitialGuess(guess);
      cerr << "Frames: " << _previousFrame << " " << frame << endl;

      
      // projector->setCameraMatrix(cameraMatrix);
      // projector->setTransform(Eigen::Isometry3f::Identity());
      // Eigen::MatrixXi debugIndices(r,c);
      // DepthImage debugImage(r,c);
      // projector->project(debugIndices, debugImage, frame->points());

      _aligner->align();
      
      // sprintf(buf, "img-dbg-%05d.pgm",j);
      // debugImage.save(buf);
      //sprintf(buf, "img-ref-%05d.pgm",j);
      //_aligner->correspondenceFinder()->referenceDepthImage().save(buf);
      //sprintf(buf, "img-cur-%05d.pgm",j);
      //_aligner->correspondenceFinder()->currentDepthImage().save(buf);

      cerr << "inliers: " << _aligner->inliers() << endl;
      cerr << "chi2: " << _aligner->error() << endl;
      cerr << "chi2/inliers: " << _aligner->error()/_aligner->inliers() << endl;
      cerr << "initialGuess: " << t2v(guess).transpose() << endl;
      cerr << "transform   : " << t2v(_aligner->T()).transpose() << endl;
      if (_aligner->inliers()>-1){
 	_globalT = _globalT*_aligner->T();
	cerr << "TRANSFORM FOUND" <<  endl;
      } else {
	cerr << "FAILURE" <<  endl;
	_globalT = _globalT*guess;
      }
      if (! (_counter%50) ) {
	Eigen::Matrix3f R = _globalT.linear();
	Eigen::Matrix3f E = R.transpose() * R;
	E.diagonal().array() -= 1;
	_globalT.linear() -= 0.5 * R * E;
      }
      _globalT.matrix().row(3) << 0,0,0,1;
      cerr << "globalTransform   : " << t2v(_globalT).transpose() << endl;

      // char buf[1024];
      // sprintf(buf, "frame-%05d.pwn",_counter);
      // frame->save(buf, 1, true, _globalT);


      cerr << "creating alias" << endl;
      // create an alias for the previous node
      MapNodeAlias* newRoot = new MapNodeAlias(_previousSensingFrameNode,_previousSensingFrameNode->manager());
      _previousSensingFrameNode->manager()->addNode(newRoot);
      
      cerr << "creating alias relation" << endl;
      MapNodeAliasRelation* aliasRel = new MapNodeAliasRelation(newRoot,_previousSensingFrameNode->manager());
      aliasRel->nodes()[0] = newRoot;
      aliasRel->nodes()[1] = _previousSensingFrameNode;
      _previousSensingFrameNode->manager()->addRelation(aliasRel);
      
      cerr << "reparenting old elements" << endl;
      // assign all the used relations to the alias node
      if (imu){
	imu->setOwner(newRoot);
	imu->manager()->addRelation(imu);
      }

      if (odom){
	odom->setOwner(newRoot);
	odom->manager()->addRelation(imu);
      }
      
      cerr << "adding result" << endl;
      
      Relation* newRel = new Relation(_aligner, _converter, 
	       _previousSensingFrameNode, sensingFrame,
	       _topic, _aligner->inliers(), _aligner->error(), _manager);
      newRel->setOwner(newRoot);
      newRel->nodes()[0] = newRoot;
      newRel->nodes()[1] = _previousSensingFrameNode;
      Eigen::Isometry3d iso;
      convertScalar(iso,_aligner->T());
      newRel->setTransform(iso);
      newRel->setInformationMatrix(Eigen::Matrix<double, 6,6>::Identity());
      newRel->manager()->addRelation(newRel);
      

      *os << _globalT.translation().transpose() << endl;

      // create a new alias node for the prior element
      // bind the alias with the prior node

      // add to the alias the relations that have been used to
      // determine the transformation

      // add the alias to the map
      // add the new transformation to the map
      
    } else {
      _aligner->setCurrentSensorOffset(sensorOffset);
      _globalT = Eigen::Isometry3f::Identity();
      if (imu){
      	Eigen::Isometry3f mean;
      	convertScalar(mean,imu->transform());
	_globalT = mean;
      }
    }
    
    cerr << "deleting previous frame" << endl;
    if (_previousFrame)
      delete _previousFrame;
    
    cerr << "deleting blob frame" << endl;
    delete blob;

    cerr << "bookkeeping update" << endl;
    _previousSensingFrameNode = sensingFrame;
    _previousFrame = frame;
    _counter++;
    
    cerr << "END ITERATION" << endl;
  }