Beispiel #1
0
/**
 * @param pose The camera pose, populated by the next method
 * @return The next DepthImage to be read or nullptr if none
 */
DepthImage * TUMDataLoader::next( Eigen::Matrix4f& pose )  {
	DepthImage * image = nullptr;

	if ( m_current_idx < m_data_records.size() ) {

		struct DATA_RECORD dr = m_data_records[m_current_idx];
		bool is_directory = false;


		if ( file_exists( dr.file_name, is_directory ) && !is_directory ) {
			image = new DepthImage( dr.file_name );

			// TUM data is 5000 to 1m
			// we want mm so divide by 5.
			image->scale_depth( 0.2f );
			pose = to_pose( dr.data );
		} else {
			std::cerr << "Couldn't find file " << dr.file_name << std::endl;
		}

		m_current_idx ++;

	}
	return image;
}
  void PinholePointProjector::project(IntImage &indexImage,
				      DepthImage &depthImage, 
				      const PointVector &points) const {
    assert(_imageRows && _imageCols && "PinholePointProjector: _imageRows and _imageCols are zero");
    
    indexImage.create(_imageRows, _imageCols);
    depthImage.create(_imageRows, _imageCols);
 
    depthImage.setTo(cv::Scalar(std::numeric_limits<float>::max()));
    indexImage.setTo(cv::Scalar(-1));   

    float *drowPtrs[_imageRows];
    int *irowPtrs[_imageRows];
    for(int i = 0; i < _imageRows; i++) {
      drowPtrs[i] = &depthImage(i, 0);
      irowPtrs[i] = &indexImage(i, 0);
    }
    const Point *point = &points[0];
    for(size_t i = 0; i < points.size(); i++, point++) {
      int x, y;
      float d;
      if(!_project(x, y, d, *point) ||
	 d < _minDistance || d > _maxDistance ||
	 x < 0 || x >= indexImage.cols ||
	 y < 0 || y >= indexImage.rows)
	continue;
      float &otherDistance = drowPtrs[y][x];
      int &otherIndex = irowPtrs[y][x];
      if(!otherDistance || otherDistance > d) {
	otherDistance = d;
	otherIndex = i;
      }
    }
  }
DepthImage DepthImage::sparse(){
	//Canny params
	//int edgeThresh = 1;
	int lowThreshold=50;
	//int const max_lowThreshold = 100;
	int ratio = 3;
	int kernel_size = 5;

	DepthImage dImg=*this;
	DepthImage di;
	Mat src_gray,detected_edges,imagec,depth;
	imagec=dImg.getImg();
	/// Reduce noise with a kernel 3x3
	cvtColor( imagec, src_gray, CV_BGR2GRAY );
	//blur( src_gray, detected_edges, Size(3,3) );
	GaussianBlur( src_gray, detected_edges, Size(5,5), 5, 5, BORDER_DEFAULT );
	//medianBlur (src_gray, detected_edges, 15 );
	//bilateralFilter (src_gray, detected_edges, 15,15*2, 15/2 );
	/// Canny detector
	Canny( detected_edges, detected_edges, lowThreshold, lowThreshold*ratio, kernel_size );

	Mat depthAll;
	depthAll=dImg.getDepth();
    depth = Scalar::all(0);
    depthAll.copyTo( depth, detected_edges);

    di.setImg(imagec);
    di.setDepth(depth);
//    cout << dImg.getCentroid()<< " centroid"<<endl;
//    cout << dImg.getPoints3D().size()/1000 << "mil filtered points" <<endl;
//    cout << di.getCentroid()<< " centroid"<<endl;
//    cout << di.getPoints3D().size()/1000 << "mil filtered points" <<endl;
    return di;
}
Beispiel #4
0
bool LcmTranslator::
fromLcm(const drc::map_image_t& iMessage, DepthImageView& oView) {

  // transform from reference to image coords
  Eigen::Projective3f xform;
  for (int i = 0; i < 4; ++i) {
    for (int j = 0; j < 4; ++j) {
      xform(i,j) = iMessage.transform[i][j];
    }
  }

  // data blob
  DataBlob blob;
  if (!fromLcm(iMessage.blob, blob)) return false;

  // convert to depth image
  float maxVal;
  switch(blob.getSpec().mDataType) {
  case DataBlob::DataTypeUint8: maxVal = 254; break;
  case DataBlob::DataTypeUint16: maxVal = 65534; break;
  default: maxVal = std::numeric_limits<float>::infinity(); break;
  }
  blob.convertTo(DataBlob::CompressionTypeNone, DataBlob::DataTypeFloat32); 
  float* raw = (float*)(&blob.getBytes()[0]);
  std::vector<float> depths(raw, raw+blob.getBytes().size()/sizeof(float));
  for (int i = 0; i < (int)depths.size(); ++i) {
    if (depths[i] >= maxVal) {
      depths[i] = std::numeric_limits<float>::infinity();
    }
    else {
      depths[i] = iMessage.data_scale*depths[i] + iMessage.data_shift;
    }
  }

  DepthImage img;
  img.setSize(blob.getSpec().mDimensions[0], blob.getSpec().mDimensions[1]);
  img.setProjector(xform);
  img.setData(depths, DepthImage::TypeDepth);
  oView.setId(iMessage.view_id);
  oView.set(img);

  // done
  // NOTE: ids not set here
  return true;
}
void copyFrameToImage(VideoFrameRef frame, DepthImage& image)
{
	if (image.height() != frame.getHeight() || image.width() != frame.getWidth())
	{
		image = DepthImage(
			DepthImage::Data(
			static_cast<const uint16_t*>(frame.getData()),
			static_cast<const uint16_t*>(frame.getData()) + frame.getWidth() * frame.getHeight()),
			frame.getWidth(),
			frame.getHeight());
	}
	else
	{
		image.data(DepthImage::Data(
			static_cast<const uint16_t*>(frame.getData()),
			static_cast<const uint16_t*>(frame.getData()) + frame.getWidth() * frame.getHeight()));
	}
}
Beispiel #6
0
  void ViewerState::addCloudSelected(){
    if(listItem) {
      cerr << "adding a frame" << endl;
      int index = pwnGMW->listWidget->row(listItem);
      cerr << "index: " << endl;
      std::string fname = listAssociations[index]->baseFilename()+"_depth.pgm";
      DepthImage depthImage;
      if (!depthImage.load(fname.c_str(), true)){
      	cerr << " skipping " << fname << endl;
      	newCloudAdded = false;
      	*addCloud = 0;
      	return;
      }
      DepthImage scaledDepthImage;
      DepthImage::scale(scaledDepthImage, depthImage, reduction);
      imageRows = scaledDepthImage.rows();
      imageCols = scaledDepthImage.cols();
      correspondenceFinder->setSize(imageRows, imageCols);
      Frame * frame=new Frame();
      converter->compute(*frame, scaledDepthImage, sensorOffset);
      OptimizableGraph::DataContainer* dc =listAssociations[index]->dataContainer();
      cerr << "datacontainer: " << dc << endl;
      VertexSE3* v = dynamic_cast<VertexSE3*>(dc);
      cerr << "vertex of the frame: " << v->id() << endl;

      DrawableFrame* drawableFrame = new DrawableFrame(globalT, drawableFrameParameters, frame);
      drawableFrame->_vertex = v;
      
      Eigen::Isometry3f priorMean;
      Matrix6f priorInfo;
      bool hasImu =extractAbsolutePrior(priorMean, priorInfo, drawableFrame);
      if (hasImu && drawableFrameVector.size()==0){
        	globalT.linear() = priorMean.linear();
        	cerr << "!!!! i found an imu for the first vertex, me happy" << endl;
        	drawableFrame->setTransformation(globalT);
      }
      drawableFrameVector.push_back(drawableFrame);
      pwnGMW->viewer_3d->addDrawable(drawableFrame);
    }
    newCloudAdded = true;
    *addCloud = 0;
    _meHasNewFrame = true;
  }
HRESULT ImageReaderSensor::processDepth()
{
	HRESULT hr = S_OK;
	if (m_CurrentFrameNumberDepth >= m_NumFrames) {
		return S_FALSE;
	}
	std::cout << "Processing Depth Frame " << m_CurrentFrameNumberDepth << std::endl;
	char frameNumber_c[10];
	sprintf_s(frameNumber_c,"%06d", m_CurrentFrameNumberDepth+1);
	std::string frameNumber(frameNumber_c);
	std::string currFileName = m_BaseFilename;
	currFileName.append("depth/").append(frameNumber).append(".png");
	DepthImage image;
	FreeImageWrapper::loadImage(currFileName, image);
	image.flipY();
	for (UINT i = 0; i < getDepthWidth() * getDepthHeight(); i++) {
		m_depthD16[i] = (USHORT)(image.getDataPointer()[i] * 1000);
	}
	m_CurrentFrameNumberDepth++;
	return hr;
}
Beispiel #8
0
  void DepthImage_scale(DepthImage &dest, const DepthImage &src, int step) {
    int rows = src.rows / step;
    int cols = src.cols / step;
    dest.create(rows, cols);
    dest.setTo(cv::Scalar(0));
    for(int r = 0; r < dest.rows; r++) {
      for(int c = 0; c < dest.cols; c++) {
	float acc = 0;
	int np = 0;
	int sr = r * step;
	int sc = c * step;
	for(int i = 0; i < step; i++) {
	  for(int j = 0; j < step; j++) {
	    if(sr + i < src.rows && sc + j < src.cols) {
	      acc += src(sr + i, sc + j);
	      np += src(sr + i, sc + j) > 0;
	    }
	  }
	}
	if(np)
	  dest(r, c) = acc / np;
      }
    }
  }
Beispiel #9
0
  DepthImageView::Ptr getLatest() {
    // copy data
    bot_core::image_t img;
    Eigen::Isometry3d pose;
    int format;
    {
      std::unique_lock<std::mutex> lock(mDataMutex);
      img = mCurrentImage;
      pose = mCurrentPose;
      format = mCurrentFormat;
    }

    // check whether data is valid
    if (img.utime == 0) return NULL;

    // get data bytes
    std::vector<uint8_t> bytes;
    if (format == bot_core::images_t::DEPTH_MM_ZIPPED) {
      unsigned long uncompressedSize = img.width*img.height*2;
      bytes.resize(uncompressedSize);
      uncompress(bytes.data(), &uncompressedSize,
                 img.data.data(), img.data.size());
    }
    else {
      bytes = img.data;
    }

    // create depth image
    DepthImage depthImage;
    depthImage.setSize(img.width, img.height);
    depthImage.setOrthographic(false);
    depthImage.setPose(pose.cast<float>());
    depthImage.setCalib(mCalibMatrix.cast<float>());

    // convert from uint16 to float and set data in depth image
    std::vector<float> depths(img.width*img.height);
    uint16_t* raw = (uint16_t*)bytes.data();
    for (int i = 0; i < (int)depths.size(); ++i) {
      depths[i] = (raw[i] == 0) ?
        depthImage.getInvalidValue(DepthImage::TypeDepth) : raw[i]/1e3f;
    }
    depthImage.setData(depths, DepthImage::TypeDepth);

    // create and return wrapper view
    DepthImageView::Ptr view(new DepthImageView());
    view->set(depthImage);
    view->setUpdateTime(img.utime);
    return view;
  }
Beispiel #10
0
void RGBDSensorWrapper::deepCopyImages(const DepthImage&  src,
                                       sensor_msgs_Image& dest,
                                       const string&      frame_id,
                                       const TickTime&    timeStamp,
                                       const UInt&        seq)
{
    dest.data.resize(src.getRawImageSize());
    
    dest.width           = src.width();
    dest.height          = src.height();
    
    memcpy(dest.data.data(), src.getRawImage(), src.getRawImageSize());
    
    dest.encoding        = yarp2RosPixelCode(src.getPixelCode());
    dest.step            = src.getRowSize();
    dest.header.frame_id = frame_id;
    dest.header.stamp    = timeStamp;
    dest.header.seq      = seq;
    dest.is_bigendian    = 0;
}
Beispiel #11
0
TEST( Camera, givenDepthMapThenGenerateNormalMap ) {

    using namespace Eigen;

    Camera *cam = Camera::default_depth_camera();;

    TUMDataLoader tum{ "/mnt/hgfs/PhD/Kinect Raw Data/TUM/rgbd_dataset_freiburg1_xyz"};


    // Load depth image
    Matrix4f pose;
    DepthImage * di = tum.next( pose );

    Matrix<float, 3, Dynamic> vertices;
    Matrix<float, 3, Dynamic> normals;

    cam->depth_image_to_vertices_and_normals(di->data(), di->width(), di->height(), vertices, normals);

    save_normals_as_colour_png("/home/dave/Desktop/cam_test_normals_tum.png", di->width(), di->height(), normals);
    save_rendered_scene_as_png("/home/dave/Desktop/cam_test_render_tum.png", di->width(), di->height(), vertices, normals, *cam, Vector3f{10000, 10000, 1000});

    delete di;
    delete cam;
}
int main(int argc, char **argv) {
  float pointSize;
  float pointStep;
  float normalLenght;
  float normalStep;
  float alphaColor;
  int initialSensorOffset;
  string filename;

  // Input parameters handling.
  g2o::CommandArgs arg;
  arg.param("pointSize", pointSize, 1.0f, "Size of the points");
  arg.param("normalLenght", normalLenght, 0, "Lenght of the normals");
  arg.param("alpha", alphaColor, 1.0f, "Alpha channel for points");
  arg.param("pointStep", pointStep, 1, "Step of the points");
  arg.param("normalStep", normalStep, 1, "Step of the normals");
  arg.param("initialSensorOffset", initialSensorOffset, 0, "Choose if use the initial sensor offset");
  arg.paramLeftOver("filename", filename, "", "Input depth image or .pwn file", true);
 
  // Set parser input.
  arg.parseArgs(argc, argv);

  Isometry3f sensorOffsetInit = Isometry3f::Identity();
  if(initialSensorOffset) {
    sensorOffsetInit.translation() = Vector3f(0.15f, 0.0f, 0.05f);
    Quaternionf quaternion = Quaternionf(0.5f, -0.5f, 0.5f, -0.5f);
    sensorOffsetInit.linear() = quaternion.toRotationMatrix();
  }
  sensorOffsetInit.matrix().row(3) << 0.0f, 0.0f, 0.0f, 1.0f;

  // Windows name
  static const char CALIBRATE_WINDOW[] = "Calibrate Window";

  // Calibration angles
  int maxTrackBarValue = 1800;
  int alpha = maxTrackBarValue / 2; // Around x
  int beta = maxTrackBarValue / 2;  // Around y
  int theta = maxTrackBarValue / 2; // Around z

  // Create the window and the trackbars
  cv::namedWindow(CALIBRATE_WINDOW);
  cvMoveWindow(CALIBRATE_WINDOW, 0, 0);
  cvResizeWindow(CALIBRATE_WINDOW, 500, 200);
  cvCreateTrackbar("Calibrate X", CALIBRATE_WINDOW, &alpha, 1800, NULL);
  cvCreateTrackbar("Calibrate Y", CALIBRATE_WINDOW, &beta, 1800, NULL);
  cvCreateTrackbar("Calibrate Z", CALIBRATE_WINDOW, &theta, 1800, NULL);  
  
  // Create objects in order to read the input depth image / pwn file
  Eigen::Matrix3f cameraMatrix;
  cameraMatrix << 
    525.0f, 0.0f, 319.5f,
    0.0f, 525.0f, 239.5f,
    0.0f, 0.0f, 1.0f;

  string extension;
  extension = filename.substr(filename.rfind(".") + 1);

  Frame frame;
  Isometry3f globalT = Isometry3f::Identity();
  cerr << "Loading " << filename.c_str() << endl;
  if(extension == "pgm") {
    PinholePointProjector projector;
    projector.setCameraMatrix(cameraMatrix);
    StatsCalculator statsCalculator;
    PointInformationMatrixCalculator pointInformationMatrixCalculator;
    NormalInformationMatrixCalculator normalInformationMatrixCalculator;
    DepthImageConverter depthImageConverter(&projector, &statsCalculator,
					    &pointInformationMatrixCalculator,
					    &normalInformationMatrixCalculator);
    
    DepthImage inputImage;
    inputImage.load(filename.c_str(),true);
    
    cerr << "Computing stats... ";
    depthImageConverter.compute(frame, inputImage);
    cerr << " done" << endl;
  }
  else if(extension == "pwn") {
    frame.load(globalT, filename.c_str());
  }
  else {
    cerr << "File extension nor recognized, quitting." << endl;
    return(0);
  }

  Isometry3f oldSensorOffset = Isometry3f::Identity();
  oldSensorOffset.matrix().row(3) << 0.0f, 0.0f, 0.0f, 1.0f;

  QApplication application(argc,argv);
  QWidget* mainWindow = new QWidget();
  mainWindow->setWindowTitle("pwn_calibration");
  QHBoxLayout* hlayout = new QHBoxLayout();
  mainWindow->setLayout(hlayout);
  QVBoxLayout* vlayout = new QVBoxLayout();
  hlayout->addItem(vlayout);
  
  PWNQGLViewer* viewer = new PWNQGLViewer(mainWindow);
  vlayout->addWidget(viewer);
  
  viewer->init();
  viewer->show();
  viewer->setAxisIsDrawn(true);
  mainWindow->show();
  
  GLParameterPoints *pointsParams = new GLParameterPoints(pointSize, Eigen::Vector4f(1.0f, 0.0f, 0.0f, 1.0f));
  pointsParams->setStep(pointStep);
  DrawablePoints *drawablePoints = new DrawablePoints(oldSensorOffset, pointsParams, &frame.points(), &frame.normals());
  viewer->addDrawable(drawablePoints);
  
  GLParameterNormals *normalParams = new GLParameterNormals(pointSize, Eigen::Vector4f(0.0f, 0.0f, 1.0f, alphaColor), normalLenght);
  DrawableNormals *drawableNormals = new DrawableNormals(oldSensorOffset, normalParams, &frame.points(), &frame.normals());
  normalParams->setStep(normalStep);
  normalParams->setNormalLength(normalLenght);
  viewer->addDrawable(drawableNormals);

  // Keep cycling
  Isometry3f sensorOffset;
  while(mainWindow->isVisible()) {
    // Updating variables
    float alphar = 2.0f * 3.14*((float)alpha - maxTrackBarValue / 2) / maxTrackBarValue;
    float betar = 2.0f * 3.14*((float)beta - maxTrackBarValue / 2) / maxTrackBarValue;
    float thetar = 2.0f * 3.14*((float)theta - maxTrackBarValue / 2) / maxTrackBarValue;
    
    // Generate rotations
    Quaternion<float> qx, qy, qz; 
    qx = AngleAxis<float>(alphar, Vector3f(1.0f, 0.0f, 0.0f));
    qy = AngleAxis<float>(betar, Vector3f(0.0f, 1.0f, 0.0f));
    qz = AngleAxis<float>(thetar, Vector3f(0.0f, 0.0f, 1.0f));
    
    Matrix3f totalRotation = qz.toRotationMatrix() * qy.toRotationMatrix() * qx.toRotationMatrix();
    sensorOffset = Isometry3f::Identity();
    sensorOffset.translation() = Vector3f(0.0f, 0.0f, 0.0f);
    sensorOffset.linear() = totalRotation * sensorOffsetInit.linear();
    sensorOffset.matrix().row(3) << 0.0f, 0.0f, 0.0f, 1.0f;

    drawablePoints->setTransformation(sensorOffset);
    drawableNormals->setTransformation(sensorOffset);

    oldSensorOffset = sensorOffset;

    viewer->updateGL();
    application.processEvents();

    cv::waitKey(33);
  }

  ofstream os("sensorOffset.txt");
  Vector6f offset = t2v(sensorOffset);
  os << offset[0] << " " << offset[1] << " " << offset[2] << " " 
     << offset[3] << " " << offset[4] << " " << offset[5] << endl;

  return(0);
}
Beispiel #13
0
bool SavePng(std::string filename, const DepthImage& img)
{
    // Open up a file for writing
    FILE* output = fopen(filename.c_str(), "wb");
    if (output == NULL)
    {
        printf("Failed to open PNG file for writing (errno = %i)\n", errno);
        return false;
    }

    // Create a png pointer with the callbacks above
    png_structp png_ptr = png_create_write_struct(
        PNG_LIBPNG_VER_STRING, NULL, on_png_error, on_png_warn);
    if (png_ptr == NULL)
    {
        fprintf(stderr, "Failed to allocate png write_struct\n");
        fclose(output);
        return false;
    }

    // Create an info pointer
    png_infop info_ptr = png_create_info_struct(png_ptr);
    if (info_ptr == NULL)
    {
        fprintf(stderr, "Failed to create png info_struct");
        png_destroy_write_struct(&png_ptr, &info_ptr);
        fclose(output);
        return false;
    }

    // Set physical vars
    png_set_IHDR(png_ptr, info_ptr, img.cols(), img.rows(), 16,
                 PNG_COLOR_TYPE_GRAY, PNG_INTERLACE_NONE,
                 PNG_COMPRESSION_TYPE_BASE, PNG_FILTER_TYPE_BASE);

    png_init_io(png_ptr, output);

    const float zmax = img.maxCoeff();
    const float zmin = (img == -std::numeric_limits<float>::infinity())
            .select(DepthImage::Constant(img.rows(), img.cols(), zmax),
                    img)
            .minCoeff();

    auto scaled = (zmax == zmin)
        ? DepthImage((img - zmin) + 65535)
        : DepthImage((img - zmin) * 65534 / (zmax - zmin) + 1);
    Eigen::Array<uint16_t, Eigen::Dynamic, Eigen::Dynamic>
        pixels = scaled.cast<uint16_t>().transpose();

    std::vector<uint16_t*> rows;
    for (int i=pixels.cols() - 1; i >= 0; --i)
    {
        rows.push_back(pixels.data() + i * pixels.rows());
    }

    png_set_rows(png_ptr, info_ptr, reinterpret_cast<png_bytepp>(&rows[0]));
    png_write_png(png_ptr, info_ptr, PNG_TRANSFORM_SWAP_ENDIAN, NULL);
    fclose(output);

    png_destroy_write_struct(&png_ptr, &info_ptr);
    return true;
}
int main (int argc, char** argv) {
    // Handle input
    float pointSize;
    float pointStep;
    float alpha;
    int applyTransform;
    int step;
    string logFilename;
    string configFilename;
    float di_scaleFactor;
    float scale;

    g2o::CommandArgs arg;
    arg.param("vz_pointSize", pointSize, 1.0f, "Size of the points where are visualized");
    arg.param("vz_transform", applyTransform, 1, "Choose if you want to apply the absolute transform of the point clouds");
    arg.param("vz_step", step, 1, "Visualize a point cloud each vz_step point clouds");
    arg.param("vz_alpha", alpha, 1.0f, "Alpha channel value used for the color points");
    arg.param("vz_pointStep", pointStep, 1, "Step at which point are drawn");
    arg.param("vz_scale", scale, 2, "Depth image size reduction factor");
    arg.param("di_scaleFactor", di_scaleFactor, 0.001f, "Scale factor to apply to convert depth images in meters");
    arg.paramLeftOver("configFilename", configFilename, "", "Configuration filename", true);
    arg.paramLeftOver("logFilename", logFilename, "", "Log filename", true);
    arg.parseArgs(argc, argv);

    // Create GUI
    QApplication application(argc,argv);
    QWidget *mainWindow = new QWidget();
    mainWindow->setWindowTitle("pwn_tracker_log_viewer");
    QHBoxLayout *hlayout = new QHBoxLayout();
    mainWindow->setLayout(hlayout);
    QVBoxLayout *vlayout = new QVBoxLayout();
    hlayout->addItem(vlayout);
    QVBoxLayout *vlayout2 = new QVBoxLayout();
    hlayout->addItem(vlayout2);
    hlayout->setStretch(1, 1);

    QListWidget* listWidget = new QListWidget(mainWindow);
    listWidget->setSelectionMode(QAbstractItemView::MultiSelection);
    vlayout->addWidget(listWidget);
    PWNQGLViewer* viewer = new PWNQGLViewer(mainWindow);
    vlayout2->addWidget(viewer);
    Eigen::Isometry3f T;
    T.setIdentity();
    T.matrix().row(3) << 0.0f, 0.0f, 0.0f, 1.0f;

    // Read config file
    cout << "Loading config file..." << endl;
    Aligner *aligner;
    DepthImageConverter *converter;
    std::vector<Serializable*> instances = readConfig(aligner, converter, configFilename.c_str());
    converter->projector()->scale(1.0f/scale);
    cout << "... done" << endl;

    // Read and parse log file
    std::vector<boss::Serializable*> objects;
    std::vector<PwnTrackerFrame*> trackerFrames;
    std::vector<PwnTrackerRelation*> trackerRelations;
    Deserializer des;
    des.setFilePath(logFilename);
    cout << "Loading log file..." << endl;
    load(trackerFrames, trackerRelations, objects, des, step);
    cout << "... done" << endl;

    // Load the drawable list with the PwnTrackerFrame objects
    std::vector<Frame*> pointClouds;
    pointClouds.resize(trackerFrames.size());
    Frame *dummyFrame = 0;
    std::fill(pointClouds.begin(), pointClouds.end(), dummyFrame);
    for(size_t i = 0; i < trackerFrames.size(); i++) {
        PwnTrackerFrame *pwnTrackerFrame = trackerFrames[i];
        char nummero[1024];
        sprintf(nummero, "%05d", (int)i);
        listWidget->addItem(QString(nummero));
        QListWidgetItem *lastItem = listWidget->item(listWidget->count() - 1);

        Isometry3f transform = Isometry3f::Identity();
        if(applyTransform) {
            isometry3d2f(transform, pwnTrackerFrame->transform());
            transform = transform*pwnTrackerFrame->sensorOffset;
        }
        transform.matrix().row(3) << 0.0f, 0.0f, 0.0f, 1.0f;

        GLParameterFrame *frameParams = new GLParameterFrame();
        frameParams->setStep(pointStep);
        frameParams->setShow(false);
        DrawableFrame* drawableFrame = new DrawableFrame(transform, frameParams, pointClouds[i]);
        viewer->addDrawable(drawableFrame);
    }

    // Manage GUI
    viewer->init();
    mainWindow->show();
    viewer->show();
    listWidget->show();
    viewer->setAxisIsDrawn(true);
    bool selectionChanged = false;
    QListWidgetItem *item = 0;
    DepthImage depthImage;
    DepthImage scaledDepthImage;
    while (mainWindow->isVisible()) {
        selectionChanged = false;
        for (int i = 0; i<listWidget->count(); i++) {
            item = 0;
            item = listWidget->item(i);
            DrawableFrame *drawableFrame = dynamic_cast<DrawableFrame*>(viewer->drawableList()[i]);
            if (item && item->isSelected()) {
                if(!drawableFrame->parameter()->show()) {
                    drawableFrame->parameter()->setShow(true);
                    selectionChanged = true;
                }
                if(pointClouds[i] == 0) {
                    pointClouds[i] = new Frame();
                    boss_map::ImageBLOB* fromDepthBlob = trackerFrames[i]->depthImage.get();
                    DepthImage depthImage;
                    depthImage.fromCvMat(fromDepthBlob->cvImage());
                    DepthImage::scale(scaledDepthImage, depthImage, scale);
                    converter->compute(*pointClouds[i], scaledDepthImage);
                    drawableFrame->setFrame(pointClouds[i]);
                    delete fromDepthBlob;
                }
            } else {
                drawableFrame->parameter()->setShow(false);
                selectionChanged = true;
            }
        }
        if (selectionChanged)
            viewer->updateGL();

        application.processEvents();
    }

    return 0;
}
  void TwoDepthImageAlignerNode::processNode(MapNode* node_){
   cerr << "START ITERATION" << endl;
   std::vector<Serializable*> crearedObjects;
    SensingFrameNode* sensingFrame = dynamic_cast<SensingFrameNode*>(node_);
    if (! sensingFrame)
      return;
    
    PinholeImageData* image = dynamic_cast<PinholeImageData*>(sensingFrame->sensorData(_topic));
    if (! image)
      return;
    cerr << "got image"  << endl;
    
    Eigen::Isometry3d _sensorOffset = _config->sensorOffset(image->baseSensor());
    
    // cerr << "sensorOffset: " << endl;
    // cerr << _sensorOffset.matrix() << endl;

    Eigen::Isometry3f sensorOffset;
    convertScalar(sensorOffset,_sensorOffset);
    sensorOffset.matrix().row(3) << 0,0,0,1;

    Eigen::Matrix3d _cameraMatrix = image->cameraMatrix();
    
    ImageBLOB* blob = image->imageBlob().get();
    
    DepthImage depthImage;
    depthImage.fromCvMat(blob->cvImage());
    int r=depthImage.rows();
    int c=depthImage.cols();
    
    DepthImage scaledImage;
    Eigen::Matrix3f cameraMatrix;
    convertScalar(cameraMatrix,_cameraMatrix);
    
    //computeScaledParameters(r,c,cameraMatrix,_scale);
    PinholePointProjector* projector=dynamic_cast<PinholePointProjector*>(_converter->projector());
    cameraMatrix(2,2)=1;
    projector->setCameraMatrix(cameraMatrix);
    projector->setImageSize(depthImage.rows(), depthImage.cols());
    projector->scale(1.0/_scale);

    DepthImage::scale(scaledImage,depthImage,_scale);
    pwn::Frame* frame = new pwn::Frame;
    _converter->compute(*frame,scaledImage, sensorOffset);
  
    MapNodeBinaryRelation* odom=0;

    std::vector<MapNode*> oneNode(1);
    oneNode[0]=sensingFrame;
    MapNodeUnaryRelation* imu = extractRelation<MapNodeUnaryRelation>(oneNode);
    
    if (_previousFrame){
      _aligner->setReferenceSensorOffset(_aligner->currentSensorOffset());
      _aligner->setCurrentSensorOffset(sensorOffset);
      _aligner->setReferenceFrame(_previousFrame);
      _aligner->setCurrentFrame(frame);
      
      PinholePointProjector* projector=(PinholePointProjector*)(_aligner->projector());
      projector->setCameraMatrix(cameraMatrix);
      projector->setImageSize(depthImage.rows(), depthImage.cols());
      projector->scale(1.0/_scale);
      _aligner->correspondenceFinder()->setImageSize(projector->imageRows(), projector->imageCols());

      /*
	cerr << "correspondenceFinder: "  << r << " " << c << endl; 
	cerr << "sensorOffset" << endl;
	cerr <<_aligner->currentSensorOffset().matrix() << endl;
	cerr <<_aligner->referenceSensorOffset().matrix() << endl;
	cerr << "cameraMatrix" << endl;
	cerr << projector->cameraMatrix() << endl;
      */

      std::vector<MapNode*> twoNodes(2);
      twoNodes[0]=_previousSensingFrameNode;
      twoNodes[1]=sensingFrame;
      odom = extractRelation<MapNodeBinaryRelation>(twoNodes);
      cerr << "odom:" << odom << " imu:" << imu << endl;

      Eigen::Isometry3f guess= Eigen::Isometry3f::Identity();
      _aligner->clearPriors();
      if (odom){
      	Eigen::Isometry3f mean;
      	Eigen::Matrix<float,6,6> info;
      	convertScalar(mean,odom->transform());
	mean.matrix().row(3) << 0,0,0,1;
	convertScalar(info,odom->informationMatrix());
	//cerr << "odom: " << t2v(mean).transpose() << endl;
	_aligner->addRelativePrior(mean,info);
 	//guess = mean;
      } 

      if (imu){
      	Eigen::Isometry3f mean;
      	Eigen::Matrix<float,6,6> info;
      	convertScalar(mean,imu->transform());
      	convertScalar(info,imu->informationMatrix());
	mean.matrix().row(3) << 0,0,0,1;
	//cerr << "imu: " << t2v(mean).transpose() << endl;
	_aligner->addAbsolutePrior(_globalT,mean,info);
      }
      _aligner->setInitialGuess(guess);
      cerr << "Frames: " << _previousFrame << " " << frame << endl;

      
      // projector->setCameraMatrix(cameraMatrix);
      // projector->setTransform(Eigen::Isometry3f::Identity());
      // Eigen::MatrixXi debugIndices(r,c);
      // DepthImage debugImage(r,c);
      // projector->project(debugIndices, debugImage, frame->points());

      _aligner->align();
      
      // sprintf(buf, "img-dbg-%05d.pgm",j);
      // debugImage.save(buf);
      //sprintf(buf, "img-ref-%05d.pgm",j);
      //_aligner->correspondenceFinder()->referenceDepthImage().save(buf);
      //sprintf(buf, "img-cur-%05d.pgm",j);
      //_aligner->correspondenceFinder()->currentDepthImage().save(buf);

      cerr << "inliers: " << _aligner->inliers() << endl;
      cerr << "chi2: " << _aligner->error() << endl;
      cerr << "chi2/inliers: " << _aligner->error()/_aligner->inliers() << endl;
      cerr << "initialGuess: " << t2v(guess).transpose() << endl;
      cerr << "transform   : " << t2v(_aligner->T()).transpose() << endl;
      if (_aligner->inliers()>-1){
 	_globalT = _globalT*_aligner->T();
	cerr << "TRANSFORM FOUND" <<  endl;
      } else {
	cerr << "FAILURE" <<  endl;
	_globalT = _globalT*guess;
      }
      if (! (_counter%50) ) {
	Eigen::Matrix3f R = _globalT.linear();
	Eigen::Matrix3f E = R.transpose() * R;
	E.diagonal().array() -= 1;
	_globalT.linear() -= 0.5 * R * E;
      }
      _globalT.matrix().row(3) << 0,0,0,1;
      cerr << "globalTransform   : " << t2v(_globalT).transpose() << endl;

      // char buf[1024];
      // sprintf(buf, "frame-%05d.pwn",_counter);
      // frame->save(buf, 1, true, _globalT);


      cerr << "creating alias" << endl;
      // create an alias for the previous node
      MapNodeAlias* newRoot = new MapNodeAlias(_previousSensingFrameNode,_previousSensingFrameNode->manager());
      _previousSensingFrameNode->manager()->addNode(newRoot);
      
      cerr << "creating alias relation" << endl;
      MapNodeAliasRelation* aliasRel = new MapNodeAliasRelation(newRoot,_previousSensingFrameNode->manager());
      aliasRel->nodes()[0] = newRoot;
      aliasRel->nodes()[1] = _previousSensingFrameNode;
      _previousSensingFrameNode->manager()->addRelation(aliasRel);
      
      cerr << "reparenting old elements" << endl;
      // assign all the used relations to the alias node
      if (imu){
	imu->setOwner(newRoot);
	imu->manager()->addRelation(imu);
      }

      if (odom){
	odom->setOwner(newRoot);
	odom->manager()->addRelation(imu);
      }
      
      cerr << "adding result" << endl;
      
      Relation* newRel = new Relation(_aligner, _converter, 
	       _previousSensingFrameNode, sensingFrame,
	       _topic, _aligner->inliers(), _aligner->error(), _manager);
      newRel->setOwner(newRoot);
      newRel->nodes()[0] = newRoot;
      newRel->nodes()[1] = _previousSensingFrameNode;
      Eigen::Isometry3d iso;
      convertScalar(iso,_aligner->T());
      newRel->setTransform(iso);
      newRel->setInformationMatrix(Eigen::Matrix<double, 6,6>::Identity());
      newRel->manager()->addRelation(newRel);
      

      *os << _globalT.translation().transpose() << endl;

      // create a new alias node for the prior element
      // bind the alias with the prior node

      // add to the alias the relations that have been used to
      // determine the transformation

      // add the alias to the map
      // add the new transformation to the map
      
    } else {
      _aligner->setCurrentSensorOffset(sensorOffset);
      _globalT = Eigen::Isometry3f::Identity();
      if (imu){
      	Eigen::Isometry3f mean;
      	convertScalar(mean,imu->transform());
	_globalT = mean;
      }
    }
    
    cerr << "deleting previous frame" << endl;
    if (_previousFrame)
      delete _previousFrame;
    
    cerr << "deleting blob frame" << endl;
    delete blob;

    cerr << "bookkeeping update" << endl;
    _previousSensingFrameNode = sensingFrame;
    _previousFrame = frame;
    _counter++;
    
    cerr << "END ITERATION" << endl;
  }