void P3ReplicationGroup::open() throw (ServiceException&) { ACE_DEBUG((LM_DEBUG, ACE_TEXT("(%t|%T)INFO: P3ReplicationGroup::open()()\n"))); m_closed = false; list<QoSEndpoint> qosList; //QoSEndpoint hrt(QoS::HRT, CPUQoS::MED_RT_PRIO); QoSEndpoint hrt(QoS::HRT, CPUQoS::MAX_RT_PRIO); qosList.push_back(hrt); String itf; if (!P3Configuration::instance()->lookupValue("DEFAULT_INTERFACE", itf)) { throw ServiceException(ServiceException::INVALID_ARGUMENT); } m_dataSAP.open(itf, qosList); if (isPrimary()) { //open as primary openPrimary(); } else { //open as replica openReplica(); } }
double MaxflowInference::segment(PotentialsCache::ConstPtr pc, Eigen::VectorXi* seg) const { // -- Check for monkey business and allocate. ROS_ASSERT(pc->numEdgePotentials() == model_.eweights_.rows()); ROS_ASSERT(pc->numNodePotentials() == model_.nweights_.rows()); if(seg->rows() != pc->numNodes()) *seg = VecXi::Zero(pc->numNodes()); for(size_t i = 0; i < pc->node_.size(); ++i) ROS_ASSERT(seg->rows() == pc->node_[i].rows()); int num_edges = 0; for(size_t i = 0; i < pc->edge_.size(); ++i) { ROS_ASSERT(seg->rows() == pc->edge_[i].rows()); ROS_ASSERT(seg->rows() == pc->edge_[i].cols()); num_edges += pc->edge_[i].nonZeros(); } Graph3d graph(seg->rows(), num_edges); graph.add_node(seg->rows()); Eigen::VectorXd weighted_node(seg->rows()); SparseMat weighted_edge(seg->rows(), seg->rows()); pc->applyWeights(model_, &weighted_edge, &weighted_node); // -- Fill the graph with weighted node potentials. for(int i = 0; i < weighted_node.rows(); ++i) graph.add_tweights(i, weighted_node(i), 0); // -- Fill the graph with edge potentials. Assumes symmetry & upper triangular. for(int i = 0; i < weighted_edge.outerSize(); ++i) { for(SparseMatrix<double, RowMajor>::InnerIterator it(weighted_edge, i); it; ++it) { if(it.col() <= it.row()) continue; ROS_WARN_STREAM_COND(it.value() < 0, "Edgepot weighted sum is negative: " << it.value()); ROS_FATAL_STREAM_COND(isnan(it.value()), "NaN in edgepot."); graph.add_edge(it.row(), it.col(), it.value(), it.value()); } } // -- Run graph cuts. HighResTimer hrt("maxflow"); hrt.start(); double mf = graph.maxflow(); hrt.stop(); //cout << hrt.reportMilliseconds() << endl; // -- Fill the segmentation. for(int i = 0; i < seg->rows(); ++i) { if(graph.what_segment(i, Graph3d::SINK) == Graph3d::SINK) seg->coeffRef(i) = -1; else seg->coeffRef(i) = 1; } return mf; }
Cloud::Ptr SlamCalibrator::buildMap(StreamSequenceBase::ConstPtr sseq, const Trajectory& traj, double max_range, double vgsize) { std::cout<<"Building slam calibration map using max range of " << max_range << std::endl; Cloud::Ptr map(new Cloud); int num_used_frames = 0; for(size_t i = 0; i < traj.size(); ++i) { if(!traj.exists(i)) continue; // if(i % traj.size() / 10 == 0) // cout << "." << flush; Frame frame; sseq->readFrame(i, &frame); filterFringe(&frame); Cloud::Ptr tmp(new Cloud); sseq->proj_.frameToCloud(frame, tmp.get(), max_range); Cloud::Ptr nonans(new Cloud); nonans->reserve(tmp->size()); for(size_t j = 0; j < tmp->size(); ++j) if(isFinite(tmp->at(j))) nonans->push_back(tmp->at(j)); pcl::transformPointCloud(*nonans, *nonans, traj.get(i).cast<float>()); *map += *nonans; ++num_used_frames; // Added intermediate filtering to handle memory overload on huge maps if(num_used_frames % 50 == 0) { //cout << "Filtering..." << endl; HighResTimer hrt("filtering"); hrt.start(); pcl::VoxelGrid<Point> vg; vg.setLeafSize(vgsize, vgsize, vgsize); Cloud::Ptr tmp(new Cloud); vg.setInputCloud(map); vg.filter(*tmp); *map = *tmp; hrt.stop(); } } //cout << endl; //cout << "Filtering..." << endl; HighResTimer hrt("filtering"); hrt.start(); pcl::VoxelGrid<Point> vg; vg.setLeafSize(vgsize, vgsize, vgsize); Cloud::Ptr tmp(new Cloud); vg.setInputCloud(map); vg.filter(*tmp); *map = *tmp; hrt.stop(); //cout << hrt.reportMilliseconds() << endl; cout << "Filtered map has " << map->size() << " points." << endl; return map; }