int main (int argc, char** argv) { // Handle input float pointSize; float pointStep; float alpha; int applyTransform; int step; string logFilename; string configFilename; float di_scaleFactor; float scale; g2o::CommandArgs arg; arg.param("vz_pointSize", pointSize, 1.0f, "Size of the points where are visualized"); arg.param("vz_transform", applyTransform, 1, "Choose if you want to apply the absolute transform of the point clouds"); arg.param("vz_step", step, 1, "Visualize a point cloud each vz_step point clouds"); arg.param("vz_alpha", alpha, 1.0f, "Alpha channel value used for the color points"); arg.param("vz_pointStep", pointStep, 1, "Step at which point are drawn"); arg.param("vz_scale", scale, 2, "Depth image size reduction factor"); arg.param("di_scaleFactor", di_scaleFactor, 0.001f, "Scale factor to apply to convert depth images in meters"); arg.paramLeftOver("configFilename", configFilename, "", "Configuration filename", true); arg.paramLeftOver("logFilename", logFilename, "", "Log filename", true); arg.parseArgs(argc, argv); // Create GUI QApplication application(argc,argv); QWidget *mainWindow = new QWidget(); mainWindow->setWindowTitle("pwn_tracker_log_viewer"); QHBoxLayout *hlayout = new QHBoxLayout(); mainWindow->setLayout(hlayout); QVBoxLayout *vlayout = new QVBoxLayout(); hlayout->addItem(vlayout); QVBoxLayout *vlayout2 = new QVBoxLayout(); hlayout->addItem(vlayout2); hlayout->setStretch(1, 1); QListWidget* listWidget = new QListWidget(mainWindow); listWidget->setSelectionMode(QAbstractItemView::MultiSelection); vlayout->addWidget(listWidget); PWNQGLViewer* viewer = new PWNQGLViewer(mainWindow); vlayout2->addWidget(viewer); Eigen::Isometry3f T; T.setIdentity(); T.matrix().row(3) << 0.0f, 0.0f, 0.0f, 1.0f; // Read config file cout << "Loading config file..." << endl; Aligner *aligner; DepthImageConverter *converter; std::vector<Serializable*> instances = readConfig(aligner, converter, configFilename.c_str()); converter->projector()->scale(1.0f/scale); cout << "... done" << endl; // Read and parse log file std::vector<boss::Serializable*> objects; std::vector<PwnTrackerFrame*> trackerFrames; std::vector<PwnTrackerRelation*> trackerRelations; Deserializer des; des.setFilePath(logFilename); cout << "Loading log file..." << endl; load(trackerFrames, trackerRelations, objects, des, step); cout << "... done" << endl; // Load the drawable list with the PwnTrackerFrame objects std::vector<Frame*> pointClouds; pointClouds.resize(trackerFrames.size()); Frame *dummyFrame = 0; std::fill(pointClouds.begin(), pointClouds.end(), dummyFrame); for(size_t i = 0; i < trackerFrames.size(); i++) { PwnTrackerFrame *pwnTrackerFrame = trackerFrames[i]; char nummero[1024]; sprintf(nummero, "%05d", (int)i); listWidget->addItem(QString(nummero)); QListWidgetItem *lastItem = listWidget->item(listWidget->count() - 1); Isometry3f transform = Isometry3f::Identity(); if(applyTransform) { isometry3d2f(transform, pwnTrackerFrame->transform()); transform = transform*pwnTrackerFrame->sensorOffset; } transform.matrix().row(3) << 0.0f, 0.0f, 0.0f, 1.0f; GLParameterFrame *frameParams = new GLParameterFrame(); frameParams->setStep(pointStep); frameParams->setShow(false); DrawableFrame* drawableFrame = new DrawableFrame(transform, frameParams, pointClouds[i]); viewer->addDrawable(drawableFrame); } // Manage GUI viewer->init(); mainWindow->show(); viewer->show(); listWidget->show(); viewer->setAxisIsDrawn(true); bool selectionChanged = false; QListWidgetItem *item = 0; DepthImage depthImage; DepthImage scaledDepthImage; while (mainWindow->isVisible()) { selectionChanged = false; for (int i = 0; i<listWidget->count(); i++) { item = 0; item = listWidget->item(i); DrawableFrame *drawableFrame = dynamic_cast<DrawableFrame*>(viewer->drawableList()[i]); if (item && item->isSelected()) { if(!drawableFrame->parameter()->show()) { drawableFrame->parameter()->setShow(true); selectionChanged = true; } if(pointClouds[i] == 0) { pointClouds[i] = new Frame(); boss_map::ImageBLOB* fromDepthBlob = trackerFrames[i]->depthImage.get(); DepthImage depthImage; depthImage.fromCvMat(fromDepthBlob->cvImage()); DepthImage::scale(scaledDepthImage, depthImage, scale); converter->compute(*pointClouds[i], scaledDepthImage); drawableFrame->setFrame(pointClouds[i]); delete fromDepthBlob; } } else { drawableFrame->parameter()->setShow(false); selectionChanged = true; } } if (selectionChanged) viewer->updateGL(); application.processEvents(); } return 0; }
void TwoDepthImageAlignerNode::processNode(MapNode* node_){ cerr << "START ITERATION" << endl; std::vector<Serializable*> crearedObjects; SensingFrameNode* sensingFrame = dynamic_cast<SensingFrameNode*>(node_); if (! sensingFrame) return; PinholeImageData* image = dynamic_cast<PinholeImageData*>(sensingFrame->sensorData(_topic)); if (! image) return; cerr << "got image" << endl; Eigen::Isometry3d _sensorOffset = _config->sensorOffset(image->baseSensor()); // cerr << "sensorOffset: " << endl; // cerr << _sensorOffset.matrix() << endl; Eigen::Isometry3f sensorOffset; convertScalar(sensorOffset,_sensorOffset); sensorOffset.matrix().row(3) << 0,0,0,1; Eigen::Matrix3d _cameraMatrix = image->cameraMatrix(); ImageBLOB* blob = image->imageBlob().get(); DepthImage depthImage; depthImage.fromCvMat(blob->cvImage()); int r=depthImage.rows(); int c=depthImage.cols(); DepthImage scaledImage; Eigen::Matrix3f cameraMatrix; convertScalar(cameraMatrix,_cameraMatrix); //computeScaledParameters(r,c,cameraMatrix,_scale); PinholePointProjector* projector=dynamic_cast<PinholePointProjector*>(_converter->projector()); cameraMatrix(2,2)=1; projector->setCameraMatrix(cameraMatrix); projector->setImageSize(depthImage.rows(), depthImage.cols()); projector->scale(1.0/_scale); DepthImage::scale(scaledImage,depthImage,_scale); pwn::Frame* frame = new pwn::Frame; _converter->compute(*frame,scaledImage, sensorOffset); MapNodeBinaryRelation* odom=0; std::vector<MapNode*> oneNode(1); oneNode[0]=sensingFrame; MapNodeUnaryRelation* imu = extractRelation<MapNodeUnaryRelation>(oneNode); if (_previousFrame){ _aligner->setReferenceSensorOffset(_aligner->currentSensorOffset()); _aligner->setCurrentSensorOffset(sensorOffset); _aligner->setReferenceFrame(_previousFrame); _aligner->setCurrentFrame(frame); PinholePointProjector* projector=(PinholePointProjector*)(_aligner->projector()); projector->setCameraMatrix(cameraMatrix); projector->setImageSize(depthImage.rows(), depthImage.cols()); projector->scale(1.0/_scale); _aligner->correspondenceFinder()->setImageSize(projector->imageRows(), projector->imageCols()); /* cerr << "correspondenceFinder: " << r << " " << c << endl; cerr << "sensorOffset" << endl; cerr <<_aligner->currentSensorOffset().matrix() << endl; cerr <<_aligner->referenceSensorOffset().matrix() << endl; cerr << "cameraMatrix" << endl; cerr << projector->cameraMatrix() << endl; */ std::vector<MapNode*> twoNodes(2); twoNodes[0]=_previousSensingFrameNode; twoNodes[1]=sensingFrame; odom = extractRelation<MapNodeBinaryRelation>(twoNodes); cerr << "odom:" << odom << " imu:" << imu << endl; Eigen::Isometry3f guess= Eigen::Isometry3f::Identity(); _aligner->clearPriors(); if (odom){ Eigen::Isometry3f mean; Eigen::Matrix<float,6,6> info; convertScalar(mean,odom->transform()); mean.matrix().row(3) << 0,0,0,1; convertScalar(info,odom->informationMatrix()); //cerr << "odom: " << t2v(mean).transpose() << endl; _aligner->addRelativePrior(mean,info); //guess = mean; } if (imu){ Eigen::Isometry3f mean; Eigen::Matrix<float,6,6> info; convertScalar(mean,imu->transform()); convertScalar(info,imu->informationMatrix()); mean.matrix().row(3) << 0,0,0,1; //cerr << "imu: " << t2v(mean).transpose() << endl; _aligner->addAbsolutePrior(_globalT,mean,info); } _aligner->setInitialGuess(guess); cerr << "Frames: " << _previousFrame << " " << frame << endl; // projector->setCameraMatrix(cameraMatrix); // projector->setTransform(Eigen::Isometry3f::Identity()); // Eigen::MatrixXi debugIndices(r,c); // DepthImage debugImage(r,c); // projector->project(debugIndices, debugImage, frame->points()); _aligner->align(); // sprintf(buf, "img-dbg-%05d.pgm",j); // debugImage.save(buf); //sprintf(buf, "img-ref-%05d.pgm",j); //_aligner->correspondenceFinder()->referenceDepthImage().save(buf); //sprintf(buf, "img-cur-%05d.pgm",j); //_aligner->correspondenceFinder()->currentDepthImage().save(buf); cerr << "inliers: " << _aligner->inliers() << endl; cerr << "chi2: " << _aligner->error() << endl; cerr << "chi2/inliers: " << _aligner->error()/_aligner->inliers() << endl; cerr << "initialGuess: " << t2v(guess).transpose() << endl; cerr << "transform : " << t2v(_aligner->T()).transpose() << endl; if (_aligner->inliers()>-1){ _globalT = _globalT*_aligner->T(); cerr << "TRANSFORM FOUND" << endl; } else { cerr << "FAILURE" << endl; _globalT = _globalT*guess; } if (! (_counter%50) ) { Eigen::Matrix3f R = _globalT.linear(); Eigen::Matrix3f E = R.transpose() * R; E.diagonal().array() -= 1; _globalT.linear() -= 0.5 * R * E; } _globalT.matrix().row(3) << 0,0,0,1; cerr << "globalTransform : " << t2v(_globalT).transpose() << endl; // char buf[1024]; // sprintf(buf, "frame-%05d.pwn",_counter); // frame->save(buf, 1, true, _globalT); cerr << "creating alias" << endl; // create an alias for the previous node MapNodeAlias* newRoot = new MapNodeAlias(_previousSensingFrameNode,_previousSensingFrameNode->manager()); _previousSensingFrameNode->manager()->addNode(newRoot); cerr << "creating alias relation" << endl; MapNodeAliasRelation* aliasRel = new MapNodeAliasRelation(newRoot,_previousSensingFrameNode->manager()); aliasRel->nodes()[0] = newRoot; aliasRel->nodes()[1] = _previousSensingFrameNode; _previousSensingFrameNode->manager()->addRelation(aliasRel); cerr << "reparenting old elements" << endl; // assign all the used relations to the alias node if (imu){ imu->setOwner(newRoot); imu->manager()->addRelation(imu); } if (odom){ odom->setOwner(newRoot); odom->manager()->addRelation(imu); } cerr << "adding result" << endl; Relation* newRel = new Relation(_aligner, _converter, _previousSensingFrameNode, sensingFrame, _topic, _aligner->inliers(), _aligner->error(), _manager); newRel->setOwner(newRoot); newRel->nodes()[0] = newRoot; newRel->nodes()[1] = _previousSensingFrameNode; Eigen::Isometry3d iso; convertScalar(iso,_aligner->T()); newRel->setTransform(iso); newRel->setInformationMatrix(Eigen::Matrix<double, 6,6>::Identity()); newRel->manager()->addRelation(newRel); *os << _globalT.translation().transpose() << endl; // create a new alias node for the prior element // bind the alias with the prior node // add to the alias the relations that have been used to // determine the transformation // add the alias to the map // add the new transformation to the map } else { _aligner->setCurrentSensorOffset(sensorOffset); _globalT = Eigen::Isometry3f::Identity(); if (imu){ Eigen::Isometry3f mean; convertScalar(mean,imu->transform()); _globalT = mean; } } cerr << "deleting previous frame" << endl; if (_previousFrame) delete _previousFrame; cerr << "deleting blob frame" << endl; delete blob; cerr << "bookkeeping update" << endl; _previousSensingFrameNode = sensingFrame; _previousFrame = frame; _counter++; cerr << "END ITERATION" << endl; }