int main(int argc, char ** argv){ loadData(data,true); loadTestData(); loadUserItemData(users, items, data); for(int i = 0; i < data.size(); ++i) nu[data[i].user]++; #define K 7 vector< vector< map<int,float> > > tests(K); vector< float > weight(K); int k = 0; loadResults("../ret2/results-knni-iuf.txt.0", tests[k]); weight[k] = 1; ++k; loadResults("../ret2/results-knnu-iif.txt.0", tests[k]); weight[k] = 1; ++k; loadResults("../ret2/results-knnui.txt.0", tests[k]); weight[k] = 1; ++k; loadResults("../ret2/results-language.txt.0", tests[k]); weight[k] = 0.4; ++k; loadResults("../ret2/results-repos.txt.0", tests[k]); weight[k] = 0.4; ++k; loadResults("../ret2/results-reponame.txt.0", tests[k]); weight[k] = 0.2; ++k; loadResults("../ret2/results-pop-nic.txt.0", tests[k]); weight[k] = 0.01; ++k; srand(time(0)); int j = 0; for(int step = 0; step < 20; ++step){ int i = j % K; ++j; float w0 = weight[i]; int r0 = bag(tests, weight); weight[i] *= (1 + 0.5 * (rand01() - 0.5)); int r1 = bag(tests, weight); if(r1 <= r0) weight[i] = w0; cout << step << "\t" << r0 << "\t" << r1 << endl; } for(int i = 0; i < weight.size(); ++i) cout << i << "\t" << weight[i] << endl; return 0; }
void put(property_map<treedec::grtdprinter<G>, vertex_all_t, void>& m, size_t v, std::set<U> const& p) { auto& b=bag(v, m._g); for(auto i : p){ untested(); treedec::push(b, i); } }
void put(boost::property_map<treedec::grtdprinter<G>, vertex_all_t>& m, long unsigned int& v, property<treedec::bag_t, std::vector<U> > const& p) { auto& b=bag(v, m._g); for(auto i : p.m_value){ treedec::push(b, i); } }
int main(){ int N,S,K,i; scanf("%d%d%d",&N,&S,&K); S-=K*(N*~-N/2); if(S<0)puts("0"); else{ std::vector<int>bag(S+1); bag[0]=1; for(int i=1;i<=N;i++)for(int j=i;j<=S;j++)bag[j]=(bag[j-i]+bag[j])%1000000007; printf("%d\n",bag[S]); } }
bool PolicyImprovementLoop::writePolicyImprovementStatistics(const policy_improvement_loop::PolicyImprovementStatistics& stats_msg) { std::string directory_name = std::string("/tmp/pi2_statistics/"); std::string file_name = directory_name; file_name.append(std::string("pi2_statistics.bag")); if (!boost::filesystem::exists(directory_name)) { if(stats_msg.iteration == 1) { boost::filesystem::remove_all(directory_name); } ROS_INFO("Creating directory %s...", directory_name.c_str()); ROS_VERIFY(boost::filesystem::create_directories(directory_name)); } try { if(stats_msg.iteration == 1) { rosbag::Bag bag(file_name, rosbag::bagmode::Write); bag.write(PI_STATISTICS_TOPIC_NAME, ros::Time::now(), stats_msg); bag.close(); } else { rosbag::Bag bag(file_name, rosbag::bagmode::Append); bag.write(PI_STATISTICS_TOPIC_NAME, ros::Time::now(), stats_msg); bag.close(); } } catch (rosbag::BagIOException ex) { ROS_ERROR("Could write to bag file %s: %s", file_name.c_str(), ex.what()); return false; } return true; }
Scriptable * Scriptable::Instantiate() const { Scriptable * clone = new Scriptable; clone->m_eventHandlers = m_eventHandlers; clone->m_scopes.InstantiateFrom(m_scopes); clone->m_bindings = m_bindings; if(m_eventHandlers) m_eventHandlers->IncRef(); BindingPropertyBag bag(clone); clone->m_scopes.SetBindings(bag); return clone; }
ResultCode ActionSet::Execute(ScriptWorld * world, Scriptable * target, unsigned contextScope, const IFormulaPropertyBag * optionalContext) const { m_scopeCache->Clear(); m_scopeCache->SetProperties(&target->GetScopes().GetProperties()); BindingPropertyBag bag(target); m_scopeCache->SetBindings(bag); if (optionalContext) { m_scopeCache->GetScopes().AddScope(contextScope, *optionalContext); optionalContext->PopulateNamedBindings(m_scopeCache); } m_scopeCache->SetWorld(world); return Execute(world, target, *m_scopeCache); }
GUBIControl::GUBIControl(AppWindow* window, AppEventListener* appListener) : NotCopyable() { m_impl = new Impl; m_impl->window = window; m_impl->time = new gubi::TimeDefault; m_impl->renderBridge = new GUBIRenderBridge; m_impl->appListener = appListener; gubi::CreateDevice(gubi::DeviceFlags_Default, m_impl->time, m_impl->renderBridge, this, &m_impl->gubiDevice); m_impl->window->setListener(this); // make a test widget with an image and a view gubi::IView *view = NULL; if (m_impl->gubiDevice->CreateView(L"testView", &view)) { gubi::IImage *image = NULL; if (m_impl->renderBridge->createImage("resources/","bar_plus.png", &image)) { gubi::SmartPointer<gubi::PropertyBag> propBag(new gubi::PropertyBag); gubi::InsertProperty(propBag, L"image", image); if (m_impl->gubiDevice->CreateWidget(L"gubi.ui.image", propBag, &g_Widget)) { view->AddWidget(g_Widget, gubi::ViewSpace_Overlay); } } } gubi::SmartPointer<gubi::PropertyBag> bag(new gubi::PropertyBag); gubi::IEffect *effect = NULL; gubi::InsertProperty(bag, L"startValue", fmath::tofixed(10)); gubi::InsertProperty(bag, L"endValue", fmath::tofixed(100)); gubi::InsertProperty(bag, L"duration", 3000); gubi::InsertProperty(bag, L"period", 500); gubi::InsertProperty(bag, L"amplitude", fmath::tofixed(100)); if (m_impl->gubiDevice->CreateEffect(L"wave", bag, gubi::EffectStartOption_Stopped, &effect)) { gubi::SignalConnect(effect, L"onUpdate", this, &GUBIControl::onEffectUpdate, NULL); effect->Start(); } }
//this function just grabs the first pointcloud2 it sees. int readBag(std::string filename, sensor_msgs::PointCloud2 &cloud){ rosbag::Bag bag(filename); //check for PointCloud2s in the bag: rosbag::TypeQuery query("sensor_msgs/PointCloud2"); rosbag::View view(bag,query,ros::TIME_MIN,ros::TIME_MAX); if(view.size() > 0){ ROS_INFO("Found %d PointCloud2 messages. Reading...",view.size()); boost::shared_ptr <sensor_msgs::PointCloud2> scan; BOOST_FOREACH(rosbag::MessageInstance m, view){ if(scan = m.instantiate<sensor_msgs::PointCloud2>()){ cloud=*scan; ROS_INFO("%d by %d points in cloud",cloud.width,cloud.height); // return 0; } } ROS_ERROR("Error reading scans!"); return -2; }
//------------------------------------------------------------------------------- bool GraspAffordances::loadIcr(grasp_affordances::LoadIcr::Request &req, grasp_affordances::LoadIcr::Response &res) { res.success=false; lock_.lock(); ROS_INFO("Loading ICR from: %s",(icr_dbase_dir_ +req.file+ ".bag").c_str()); //load the icr rosbag::Bag bag(icr_dbase_dir_+ req.file + ".bag"); rosbag::View view(bag, rosbag::TopicQuery("contact_regions")); icr_msgs::ContactRegions::Ptr icr(new icr_msgs::ContactRegions); //It's actually assumed that the bag only contains one message BOOST_FOREACH(rosbag::MessageInstance const m, view) { icr = m.instantiate<icr_msgs::ContactRegions>(); if (input_icr_ == NULL) { ROS_ERROR("Could not load %s",(icr_dbase_dir_ +req.file+ ".bag").c_str()); lock_.unlock(); return res.success; } }
unsigned int bottom_up_computation_vertex_cover(G_t &G, T_t &T, treedec::app::detail::Intermediate_Results<T_t> &iRes, bool use_cache_weight_traversal=false) { std::stack<typename boost::graph_traits<T_t>::vertex_descriptor> S; if(use_cache_weight_traversal){ treedec::nice::min_weight_traversal(T, S); } else{ treedec::nice::postorder_traversal(T, S); } typename boost::graph_traits<T_t>::vertex_descriptor cur; while(!S.empty()){ cur = S.top(); S.pop(); treedec::nice::enum_node_type node_type = treedec::nice::get_type(cur, T); if(node_type == treedec::nice::LEAF){ //Store both possibilities (the empty set and the set containing one vertex). iRes.add(cur, 0, 0); iRes.add(cur, 1, 1); } else if(node_type == treedec::nice::INTRODUCE){ typename boost::graph_traits<T_t>::vertex_descriptor child = *(boost::adjacent_vertices(cur, T).first); typename boost::graph_traits<G_t>::vertex_descriptor new_vertex = treedec::nice::get_introduced_vertex(cur, T); //check if old VC is still valid, if new_vertex is added to the considered graph for(typename std::map<unsigned, int>::iterator it = iRes._results[child].begin(); it != iRes._results[child].end(); it++) { unsigned old_encoded = it->first; typename treedec_traits<T_t>::bag_type decoded_set; //TODO: get rid of this iRes.decode(child, old_encoded, decoded_set); unsigned new_encoded = iRes.encode(cur, decoded_set); auto &b = bag(cur, T); if(is_valid_extension(G, b, decoded_set, new_vertex)){ iRes.add(cur, new_encoded, iRes.get(child, old_encoded)); } else{ iRes.add(cur, new_encoded, -1); } } //check for new set in new graph for(typename std::map<unsigned, int>::iterator it = iRes._results[child].begin(); it != iRes._results[child].end(); it++) { unsigned old_encoded = it->first; encoded_iterator<typename treedec_traits<T_t>::bag_type::iterator> encIt(old_encoded, bag(child, T).begin(), bag(child, T).end()); unsigned new_encoded = iRes.encode_more(cur, child, encIt, new_vertex); if(it->second != -1){ iRes.add(cur, new_encoded, it->second + 1); } else{ //TODO: is this really correct?! /* typename treedec_traits<T_t>::bag_type new_set; iRes.decode(child, old_encoded, new_set); new_set.insert(new_vertex); if(is_vertex_cover2(G, bag(cur, T), new_set)){ iRes.add(cur, new_encoded, new_set.size()); } else{ */ iRes.add(cur, new_encoded, -1); // } } } } else if(node_type == treedec::nice::FORGET){ typename boost::graph_traits<T_t>::vertex_descriptor child = *(boost::adjacent_vertices(cur, T).first); typename boost::graph_traits<G_t>::vertex_descriptor forgotten_vertex = treedec::nice::get_forgotten_vertex(cur, T); BOOST_AUTO(subset_iters_it, make_subsets_range(bag(cur, T).begin(), bag(cur, T).end(), 0, bag(cur, T).size()).first); for(; subset_iters_it != bag(cur, T).end(); ++subset_iters_it){ unsigned old_encoded1 = iRes.encode(child, (*subset_iters_it).first, (*subset_iters_it).second); unsigned old_encoded2 = iRes.update_encoding(child, old_encoded1, forgotten_vertex); int val_without = iRes.get(child, old_encoded1); int val_with = iRes.get(child, old_encoded2); unsigned new_encoded = iRes.encode(cur, (*subset_iters_it).first, (*subset_iters_it).second); if(val_with == -1){ iRes.add(cur, new_encoded, val_without); } else if(val_without == -1){ iRes.add(cur, new_encoded, val_with); } else{ if(val_with <= val_without){ iRes.add(cur, new_encoded, val_with); } else{ iRes.add(cur, new_encoded, val_without); } } } } else if(node_type == treedec::nice::JOIN){ typename boost::graph_traits<T_t>::vertex_descriptor child1 = *(boost::adjacent_vertices(cur, T).first); typename boost::graph_traits<T_t>::vertex_descriptor child2 = *(++boost::adjacent_vertices(cur, T).first); for(typename std::map<unsigned, int>::iterator it = iRes._results[child1].begin(); it != iRes._results[child1].end(); it++) { unsigned encoded = it->first; if(iRes.get(child1, encoded) < 0 || iRes.get(child2, encoded) < 0){ iRes.add(cur, encoded, -1); } else{ unsigned subset_size = iRes.get_size(child1, encoded); iRes.add(cur, encoded, iRes.get(child1, encoded) + iRes.get(child2, encoded) - subset_size); } } } } typename boost::graph_traits<T_t>::vertex_descriptor root = find_root(T); return (unsigned int) iRes.get(root, 0); }
unsigned int bottom_up_computation_vertex_cover(G_t &G, T_t &T, std::vector<std::map<typename treedec_traits<T_t>::bag_type, int> > &results) { std::stack<typename boost::graph_traits<T_t>::vertex_descriptor> S; treedec::nice::postorder_traversal(T, S); typename boost::graph_traits<T_t>::vertex_descriptor cur; while(!S.empty()){ cur = S.top(); S.pop(); treedec::nice::enum_node_type node_type = treedec::nice::get_type(cur, T); if(node_type == treedec::nice::LEAF){ results[cur][typename treedec_traits<T_t>::bag_type()] = 0; results[cur][bag(cur, T)] = 1; } else if(node_type == treedec::nice::INTRODUCE){ typename boost::graph_traits<T_t>::vertex_descriptor child = *(boost::adjacent_vertices(cur, T).first); typename boost::graph_traits<G_t>::vertex_descriptor new_vertex = treedec::nice::get_introduced_vertex(cur, T); for(typename std::map<typename treedec_traits<T_t>::bag_type, int>::iterator it = results[child].begin(); it != results[child].end(); it++) { if(is_vertex_cover(G, bag(cur, T), it->first)){ results[cur][it->first] = results[child][it->first]; } else{ results[cur][it->first] = -1; } } for(typename std::map<typename treedec_traits<T_t>::bag_type, int>::iterator it = results[child].begin(); it != results[child].end(); it++) { typename treedec_traits<T_t>::bag_type tmp = it->first; tmp.insert(new_vertex); if(it->second != -1){ results[cur][tmp] = results[child][it->first] + 1; } else{ if(is_vertex_cover(G, bag(cur, T), tmp)){ results[cur][tmp] = tmp.size(); } else{ results[cur][tmp] = -1; } } } } else if(node_type == treedec::nice::FORGET){ typename boost::graph_traits<T_t>::vertex_descriptor child = *(boost::adjacent_vertices(cur, T).first); typename boost::graph_traits<G_t>::vertex_descriptor forgotten_vertex = treedec::nice::get_forgotten_vertex(cur, T); std::vector<typename treedec_traits<T_t>::bag_type> subs; treedec::powerset(bag(cur, T), subs); for(unsigned int i = 0; i < subs.size(); i++){ typename treedec_traits<T_t>::bag_type tmp = subs[i]; tmp.insert(forgotten_vertex); int val_with = results[child][subs[i]]; int val_without = results[child][tmp]; if(val_with == -1){ results[cur][subs[i]] = val_without; } else if(val_without == -1){ results[cur][subs[i]] = val_with; } else{ if(val_with <= val_without){ results[cur][subs[i]] = val_with; } else{ results[cur][subs[i]] = val_without; } } } } else if(node_type == treedec::nice::JOIN){ typename boost::graph_traits<T_t>::vertex_descriptor child1 = *(boost::adjacent_vertices(cur, T).first); typename boost::graph_traits<T_t>::vertex_descriptor child2 = *(++boost::adjacent_vertices(cur, T).first); std::vector<typename treedec_traits<T_t>::bag_type> subs; treedec::powerset(bag(cur, T), subs); for(unsigned int i = 0; i < subs.size(); i++){ if(results[child1][subs[i]] < 0 || results[child2][subs[i]] < 0){ results[cur][subs[i]] = -1; } else{ results[cur][subs[i]] = results[child1][subs[i]] + results[child2][subs[i]] - subs[i].size(); } } } } typename boost::graph_traits<T_t>::vertex_descriptor root = find_root(T); return (unsigned int) results[root].begin()->second; }
int main(int argc, char **argv) { ros::init(argc, argv, "msf_eval"); ros::Time::init(); enum argIndices{ bagfile = 1, EVAL_topic = 2, GT_topic = 3, singleRunOnly = 4 }; //calibration of sensor to vicon Eigen::Matrix4d T_BaBg_mat; //this is the Vicon one - SLAM sensor V0 /////////////////////////////////////////////////////////////////////////////////////////// T_BaBg_mat << 0.999706627053000, -0.022330158354000, 0.005123243528000, -0.060614697387000, 0.022650462142000, 0.997389634278000, -0.068267398302000, 0.035557942651000, -0.003589706237000, 0.068397960288000, 0.997617159323000, -0.042589657349000, 0, 0, 0, 1.000000000000000; //////////////////////////////////////////////////////////////////////////////////////////// ros::Duration dt(0.0039); const double timeSyncThreshold = 0.005; const double timeDiscretization = 10.0; //discretization step for different starting points into the dataset const double trajectoryTimeDiscretization = 0.049; //discretization of evaluation points (vision framerate for fair comparison) const double startTimeOffset = 10.0; if (argc < 4) { MSF_ERROR_STREAM("usage: ./"<<argv[0]<<" bagfile EVAL_topic GT_topic [singleRunOnly]"); return -1; } bool singleRun = false; if (argc == 5) { singleRun = atoi(argv[singleRunOnly]); } if(singleRun){ MSF_WARN_STREAM("Doing only a single run."); }else{ MSF_WARN_STREAM("Will process the dataset from different starting points."); } typedef geometry_msgs::TransformStamped GT_TYPE; typedef geometry_msgs::PoseWithCovarianceStamped EVAL_TYPE; // output file std::stringstream matlab_fname; std::string path = ros::package::getPath("msf_eval"); matlab_fname << path << "/Matlab/matlab_data" << ros::Time::now().sec << ".m"; std::ofstream outfile(matlab_fname.str().c_str()); std::stringstream poses_EVAL; std::stringstream poses_GT; assert(outfile.good()); outfile << "data=[" << std::endl; ros::Duration startOffset(startTimeOffset); sm::kinematics::Transformation T_BaBg(T_BaBg_mat); //body aslam to body Ground Truth // open for reading rosbag::Bag bag(argv[bagfile], rosbag::bagmode::Read); // views on topics rosbag::View view_EVAL(bag, rosbag::TopicQuery(argv[EVAL_topic])); rosbag::View view_GT(bag, rosbag::TopicQuery(argv[GT_topic])); //check topics if (view_EVAL.size() == 0) { MSF_ERROR_STREAM("The bag you provided does not contain messages for topic "<<argv[EVAL_topic]); return -1; } if (view_GT.size() == 0) { MSF_ERROR_STREAM("The bag you provided does not contain messages for topic "<<argv[GT_topic]); return -1; } //litter console with number of messages MSF_INFO_STREAM("Reading from "<<argv[bagfile]); MSF_INFO_STREAM("Topic "<<argv[EVAL_topic]<<", size: "<<view_EVAL.size()); MSF_INFO_STREAM("Topic "<<argv[GT_topic]<<", size: "<<view_GT.size()); //get times of first messages GT_TYPE::ConstPtr GT_begin = view_GT.begin()->instantiate<GT_TYPE>(); assert(GT_begin); EVAL_TYPE::ConstPtr POSE_begin = view_EVAL.begin()->instantiate<EVAL_TYPE>(); assert(POSE_begin); MSF_INFO_STREAM("First GT data at "<<GT_begin->header.stamp); MSF_INFO_STREAM("First EVAL data at "<<POSE_begin->header.stamp); while (true) // start eval from different starting points { rosbag::View::const_iterator it_EVAL = view_EVAL.begin(); rosbag::View::const_iterator it_GT = view_GT.begin(); // read ground truth ros::Time start; // Find start time alignment: set the GT iterater to point to a time larger than the aslam time while (true) { GT_TYPE::ConstPtr trafo = it_GT->instantiate<GT_TYPE>(); assert(trafo); ros::Time time_GT; time_GT = trafo->header.stamp + dt; EVAL_TYPE::ConstPtr pose = it_EVAL->instantiate<EVAL_TYPE>(); assert(pose); ros::Time time_EKF; time_EKF = pose->header.stamp; if (time_EKF >= time_GT) { it_GT++; if (it_GT == view_GT.end()) { MSF_ERROR_STREAM("Time synchronization failed"); return false; } } else { start = time_GT; MSF_INFO_STREAM("Time synced! GT start: "<<start <<" EVAL start: "<<time_EKF); break; } } // world frame alignment sm::kinematics::Transformation T_WaBa; sm::kinematics::Transformation T_WgBg; sm::kinematics::Transformation T_WgBg_last; sm::kinematics::Transformation T_WaWg; // now find the GT/EKF pairings int ctr = 0; //how many meas did we add this run? double ds = 0.0; // distance travelled ros::Time lastTime(0.0); MSF_INFO_STREAM("Processing measurements... Current start point: "<<startOffset<<"s into the bag."); for (; it_GT != view_GT.end(); ++it_GT) { GT_TYPE::ConstPtr trafo = it_GT->instantiate<GT_TYPE>(); assert(trafo); // find closest timestamp ros::Time time_GT = trafo->header.stamp + dt; EVAL_TYPE::ConstPtr pose = it_EVAL->instantiate<EVAL_TYPE>(); assert(pose); ros::Time time_EKF = pose->header.stamp; bool terminate = false; //get the measurement close to this GT value while (time_GT > time_EKF) { it_EVAL++; if (it_EVAL == view_EVAL.end()) { terminate = true; MSF_INFO_STREAM("done. All EKF meas processed!"); break; } pose = it_EVAL->instantiate<EVAL_TYPE>(); assert(pose); time_EKF = pose->header.stamp; } if (terminate){ break; } // add comparison value if (time_GT - start >= startOffset) { T_WaBa = sm::kinematics::Transformation( Eigen::Vector4d(-pose->pose.pose.orientation.x, -pose->pose.pose.orientation.y, -pose->pose.pose.orientation.z, pose->pose.pose.orientation.w), Eigen::Vector3d(pose->pose.pose.position.x, pose->pose.pose.position.y, pose->pose.pose.position.z)); T_WgBg = sm::kinematics::Transformation( Eigen::Vector4d(-trafo->transform.rotation.x, -trafo->transform.rotation.y, -trafo->transform.rotation.z, trafo->transform.rotation.w), Eigen::Vector3d(trafo->transform.translation.x, trafo->transform.translation.y, trafo->transform.translation.z)); // initial alignment if (ctr == 0) { T_WaWg = (T_WaBa) * T_BaBg * T_WgBg.inverse(); T_WgBg_last = T_WgBg; } sm::kinematics::Transformation dT = T_WaBa * (T_WaWg * T_WgBg * T_BaBg.inverse()).inverse(); sm::kinematics::Transformation T_WaBa_gt = (T_WaWg * T_WgBg * T_BaBg.inverse()); T_WaBa_gt = sm::kinematics::Transformation(T_WaBa_gt.q().normalized(), T_WaBa_gt.t()); dT = sm::kinematics::Transformation(dT.q().normalized(), dT.t()); // update integral if (trafo) { ds += (T_WgBg.t() - T_WgBg_last.t()).norm(); // store last GT transformation T_WgBg_last = T_WgBg; } else { // too noisy if ((T_WgBg * T_WgBg_last.inverse()).t().norm() > .1) { ds += (T_WgBg.t() - T_WgBg_last.t()).norm(); //MSF_INFO_STREAM((T_WgBg*T_WgBg_last.inverse()).t().norm()); // store last GT transformation T_WgBg_last = T_WgBg; } } Eigen::Vector3d dalpha; if (dT.q().head<3>().norm() > 1e-12) { dalpha = (asin(dT.q().head<3>().norm()) * 2 * dT.q().head<3>().normalized()); } else { dalpha = 2 * dT.q().head<3>(); } // g-vector alignment Eigen::Vector3d e_z_Wg(0, 0, 1); Eigen::Vector3d e_z_Wa = dT.C() * e_z_Wg; double dalpha_e_z = acos(std::min(1.0, e_z_Wg.dot(e_z_Wa))); if (fabs((time_GT - time_EKF).toSec()) < timeSyncThreshold && (time_EKF - lastTime).toSec() > trajectoryTimeDiscretization) { if (startOffset.toSec() == startTimeOffset) { poses_EVAL << T_WaBa.t().transpose() << ";" << std::endl; poses_GT << T_WgBg.t().transpose() << ";" << std::endl; } Eigen::Matrix<double, 6, 6> cov = getCovariance(pose); outfile << (time_GT - start).toSec() << " " << ds << " " << (T_WaBa.t() - T_WaBa_gt.t()).norm() << " " //translation << 2 * acos(std::min(1.0, fabs(dT.q()[3]))) << " " << dalpha_e_z << " " //orientation <<cov(0, 0)<<" "<<cov(1, 1)<<" "<<cov(2, 2)<<" " //position covariances <<cov(3, 3)<<" "<<cov(4, 4)<<" "<<cov(5, 5)<<";" //orientation covariances << std::endl; lastTime = time_EKF; // remember } // count comparisons ctr++; } } MSF_INFO_STREAM("Added "<<ctr<<" measurement edges."); //where in the bag should the next eval start startOffset += ros::Duration(timeDiscretization); if (ctr == 0 || singleRun) //any new measurements this run? { break; } } // cleanup bag.close(); outfile << "];"; outfile << "poses = [" << poses_EVAL.str() << "];" << std::endl; outfile << "poses_GT = [" << poses_GT.str() << "];" << std::endl; outfile.close(); return 0; }
// this is just a workbench. most of the stuff here will go into the Frontend class. int main(int argc, char **argv) { ros::init(argc, argv, "okvis_node_synchronous"); google::InitGoogleLogging(argv[0]); FLAGS_stderrthreshold = 0; // INFO: 0, WARNING: 1, ERROR: 2, FATAL: 3 FLAGS_colorlogtostderr = 1; if (argc != 3 && argc != 4) { LOG(ERROR) << "Usage: ./" << argv[0] << " configuration-yaml-file bag-to-read-from [skip-first-seconds]"; return -1; } okvis::Duration deltaT(0.0); if (argc == 4) { deltaT = okvis::Duration(atof(argv[3])); } // set up the node ros::NodeHandle nh("okvis_node"); // publisher okvis::Publisher publisher(nh); // read configuration file std::string configFilename(argv[1]); okvis::RosParametersReader vio_parameters_reader(configFilename); okvis::VioParameters parameters; vio_parameters_reader.getParameters(parameters); okvis::ThreadedKFVio okvis_estimator(parameters); okvis_estimator.setFullStateCallback(std::bind(&okvis::Publisher::publishFullStateAsCallback,&publisher,std::placeholders::_1,std::placeholders::_2,std::placeholders::_3,std::placeholders::_4)); okvis_estimator.setLandmarksCallback(std::bind(&okvis::Publisher::publishLandmarksAsCallback,&publisher,std::placeholders::_1,std::placeholders::_2,std::placeholders::_3)); okvis_estimator.setStateCallback(std::bind(&okvis::Publisher::publishStateAsCallback,&publisher,std::placeholders::_1,std::placeholders::_2)); okvis_estimator.setBlocking(true); publisher.setParameters(parameters); // pass the specified publishing stuff // extract the folder path std::string bagname(argv[2]); size_t pos = bagname.find_last_of("/"); std::string path; if (pos == std::string::npos) path = "."; else path = bagname.substr(0, pos); const unsigned int numCameras = parameters.nCameraSystem.numCameras(); // setup files to be written publisher.setCsvFile(path + "/okvis_estimator_output.csv"); publisher.setLandmarksCsvFile(path + "/okvis_estimator_landmarks.csv"); okvis_estimator.setImuCsvFile(path + "/imu0_data.csv"); for (size_t i = 0; i < numCameras; ++i) { std::stringstream num; num << i; okvis_estimator.setTracksCsvFile(i, path + "/cam" + num.str() + "_tracks.csv"); } // open the bag rosbag::Bag bag(argv[2], rosbag::bagmode::Read); // views on topics. the slash is needs to be correct, it's ridiculous... std::string imu_topic("/imu0"); rosbag::View view_imu( bag, rosbag::TopicQuery(imu_topic)); if (view_imu.size() == 0) { LOG(ERROR) << "no imu topic"; return -1; } rosbag::View::iterator view_imu_iterator = view_imu.begin(); LOG(INFO) << "No. IMU messages: " << view_imu.size(); std::vector<std::shared_ptr<rosbag::View> > view_cams_ptr; std::vector<rosbag::View::iterator> view_cam_iterators; std::vector<okvis::Time> times; okvis::Time latest(0); for(size_t i=0; i<numCameras;++i) { std::string camera_topic("/cam"+std::to_string(i)+"/image_raw"); std::shared_ptr<rosbag::View> view_ptr( new rosbag::View( bag, rosbag::TopicQuery(camera_topic))); if (view_ptr->size() == 0) { LOG(ERROR) << "no camera topic"; return 1; } view_cams_ptr.push_back(view_ptr); view_cam_iterators.push_back(view_ptr->begin()); sensor_msgs::ImageConstPtr msg1 = view_cam_iterators[i] ->instantiate<sensor_msgs::Image>(); times.push_back( okvis::Time(msg1->header.stamp.sec, msg1->header.stamp.nsec)); if (times.back() > latest) latest = times.back(); LOG(INFO) << "No. cam " << i << " messages: " << view_cams_ptr.back()->size(); } for(size_t i=0; i<numCameras;++i) { if ((latest - times[i]).toSec() > 0.01) view_cam_iterators[i]++; } int counter = 0; okvis::Time start(0.0); while (ros::ok()) { ros::spinOnce(); okvis_estimator.display(); // check if at the end if (view_imu_iterator == view_imu.end()){ std::cout << std::endl << "Finished. Press any key to exit." << std::endl << std::flush; char k = 0; while(k==0 && ros::ok()){ k = cv::waitKey(1); ros::spinOnce(); } return 0; } for (size_t i = 0; i < numCameras; ++i) { if (view_cam_iterators[i] == view_cams_ptr[i]->end()) { std::cout << std::endl << "Finished. Press any key to exit." << std::endl << std::flush; char k = 0; while(k==0 && ros::ok()){ k = cv::waitKey(1); ros::spinOnce(); } return 0; } } // add images okvis::Time t; for(size_t i=0; i<numCameras;++i) { sensor_msgs::ImageConstPtr msg1 = view_cam_iterators[i] ->instantiate<sensor_msgs::Image>(); cv::Mat filtered(msg1->height, msg1->width, CV_8UC1); memcpy(filtered.data, &msg1->data[0], msg1->height * msg1->width); t = okvis::Time(msg1->header.stamp.sec, msg1->header.stamp.nsec); if (start == okvis::Time(0.0)) { start = t; } // get all IMU measurements till then okvis::Time t_imu=start; do { sensor_msgs::ImuConstPtr msg = view_imu_iterator ->instantiate<sensor_msgs::Imu>(); Eigen::Vector3d gyr(msg->angular_velocity.x, msg->angular_velocity.y, msg->angular_velocity.z); Eigen::Vector3d acc(msg->linear_acceleration.x, msg->linear_acceleration.y, msg->linear_acceleration.z); t_imu = okvis::Time(msg->header.stamp.sec, msg->header.stamp.nsec); // add the IMU measurement for (blocking) processing if (t_imu - start > deltaT) okvis_estimator.addImuMeasurement(t_imu, acc, gyr); view_imu_iterator++; } while (view_imu_iterator != view_imu.end() && t_imu <= t); // add the image to the frontend for (blocking) processing if (t - start > deltaT) okvis_estimator.addImage(t, i, filtered); view_cam_iterators[i]++; } ++counter; // display progress if (counter % 20 == 0) { std::cout << "\rProgress: " << int(double(counter) / double(view_cams_ptr.back()->size()) * 100) << "% " ; } } std::cout << std::endl; return 0; }
nsresult HttpBaseChannel::SetupReplacementChannel(nsIURI *newURI, nsIChannel *newChannel, bool preserveMethod, bool forProxy) { LOG(("HttpBaseChannel::SetupReplacementChannel " "[this=%p newChannel=%p preserveMethod=%d forProxy=%d]", this, newChannel, preserveMethod, forProxy)); PRUint32 newLoadFlags = mLoadFlags | LOAD_REPLACE; // if the original channel was using SSL and this channel is not using // SSL, then no need to inhibit persistent caching. however, if the // original channel was not using SSL and has INHIBIT_PERSISTENT_CACHING // set, then allow the flag to apply to the redirected channel as well. // since we force set INHIBIT_PERSISTENT_CACHING on all HTTPS channels, // we only need to check if the original channel was using SSL. if (mConnectionInfo->UsingSSL()) newLoadFlags &= ~INHIBIT_PERSISTENT_CACHING; // Do not pass along LOAD_CHECK_OFFLINE_CACHE newLoadFlags &= ~nsICachingChannel::LOAD_CHECK_OFFLINE_CACHE; newChannel->SetLoadGroup(mLoadGroup); newChannel->SetNotificationCallbacks(mCallbacks); newChannel->SetLoadFlags(newLoadFlags); nsCOMPtr<nsIHttpChannel> httpChannel = do_QueryInterface(newChannel); if (!httpChannel) return NS_OK; // no other options to set if (preserveMethod) { nsCOMPtr<nsIUploadChannel> uploadChannel = do_QueryInterface(httpChannel); nsCOMPtr<nsIUploadChannel2> uploadChannel2 = do_QueryInterface(httpChannel); if (mUploadStream && (uploadChannel2 || uploadChannel)) { // rewind upload stream nsCOMPtr<nsISeekableStream> seekable = do_QueryInterface(mUploadStream); if (seekable) seekable->Seek(nsISeekableStream::NS_SEEK_SET, 0); // replicate original call to SetUploadStream... if (uploadChannel2) { const char *ctype = mRequestHead.PeekHeader(nsHttp::Content_Type); if (!ctype) ctype = ""; const char *clen = mRequestHead.PeekHeader(nsHttp::Content_Length); PRInt64 len = clen ? nsCRT::atoll(clen) : -1; uploadChannel2->ExplicitSetUploadStream( mUploadStream, nsDependentCString(ctype), len, nsDependentCString(mRequestHead.Method()), mUploadStreamHasHeaders); } else { if (mUploadStreamHasHeaders) { uploadChannel->SetUploadStream(mUploadStream, EmptyCString(), -1); } else { const char *ctype = mRequestHead.PeekHeader(nsHttp::Content_Type); const char *clen = mRequestHead.PeekHeader(nsHttp::Content_Length); if (!ctype) { ctype = "application/octet-stream"; } if (clen) { uploadChannel->SetUploadStream(mUploadStream, nsDependentCString(ctype), atoi(clen)); } } } } // since preserveMethod is true, we need to ensure that the appropriate // request method gets set on the channel, regardless of whether or not // we set the upload stream above. This means SetRequestMethod() will // be called twice if ExplicitSetUploadStream() gets called above. httpChannel->SetRequestMethod(nsDependentCString(mRequestHead.Method())); } // convey the referrer if one was used for this channel to the next one if (mReferrer) httpChannel->SetReferrer(mReferrer); // convey the mAllowPipelining flag httpChannel->SetAllowPipelining(mAllowPipelining); // convey the new redirection limit httpChannel->SetRedirectionLimit(mRedirectionLimit - 1); nsCOMPtr<nsIHttpChannelInternal> httpInternal = do_QueryInterface(newChannel); if (httpInternal) { // convey the mForceAllowThirdPartyCookie flag httpInternal->SetForceAllowThirdPartyCookie(mForceAllowThirdPartyCookie); // convey the spdy flag httpInternal->SetAllowSpdy(mAllowSpdy); // update the DocumentURI indicator since we are being redirected. // if this was a top-level document channel, then the new channel // should have its mDocumentURI point to newURI; otherwise, we // just need to pass along our mDocumentURI to the new channel. if (newURI && (mURI == mDocumentURI)) httpInternal->SetDocumentURI(newURI); else httpInternal->SetDocumentURI(mDocumentURI); // if there is a chain of keys for redirect-responses we transfer it to // the new channel (see bug #561276) if (mRedirectedCachekeys) { LOG(("HttpBaseChannel::SetupReplacementChannel " "[this=%p] transferring chain of redirect cache-keys", this)); httpInternal->SetCacheKeysRedirectChain(mRedirectedCachekeys.forget()); } } // transfer application cache information nsCOMPtr<nsIApplicationCacheChannel> appCacheChannel = do_QueryInterface(newChannel); if (appCacheChannel) { appCacheChannel->SetApplicationCache(mApplicationCache); appCacheChannel->SetInheritApplicationCache(mInheritApplicationCache); // We purposely avoid transfering mChooseApplicationCache. } // transfer any properties nsCOMPtr<nsIWritablePropertyBag> bag(do_QueryInterface(newChannel)); if (bag) mPropertyHash.EnumerateRead(CopyProperties, bag.get()); // transfer timed channel enabled status nsCOMPtr<nsITimedChannel> timed(do_QueryInterface(newChannel)); if (timed) timed->SetTimingEnabled(mTimingEnabled); if (forProxy) { // Transfer all the headers from the previous channel // this is needed for any headers that are not covered by the code above // or have been set separately. e.g. manually setting Referer without // setting up mReferrer PRUint32 count = mRequestHead.Headers().Count(); for (PRUint32 i = 0; i < count; ++i) { nsHttpAtom header; const char *value = mRequestHead.Headers().PeekHeaderAt(i, header); httpChannel->SetRequestHeader(nsDependentCString(header), nsDependentCString(value), false); } } return NS_OK; }
int main() { Bag bag(10); int n; for (int i = 0; i < 7; i++) bag += i; cout << "bag:\n" << bag << endl; Bag bagI = bag; cout << "bagI:\n" << bagI << endl; bag += 9; cout << "bagI:\n" << bagI << endl; bagI += 2; cout << "bag:\n" << bag << endl; Bag bagG(10); for (int i = 0; i < 6; i++) bagG += i; cout << "bagG:\n" << bagG << endl; bagI = bagG; cout << "bagI:\n" << bagI << endl; bagG += 14; cout << "bagI:\n" << bagI << endl; Bag bagK(10); for (int i = 19; i < 25; i++) bagK += i; cout << "bagG:\n" << bagG << endl; cout << "bagK:\n" << bagK << endl; bag = bagK + bagG; bag += 3; bag += 19; cout << "bag:\n" << bag << endl; cout << "bagI + 7 :\n" << bagI+7 << endl; cout << "bagI + bagG:\n" << bagI + bagG << endl; if(bag.isEmpty()) cout << "Bag is empty\n"; else cout << "Bag is Not empty\n"; cout << "Enter an int to look up in the bag: "; cin >> n; if(bag.contains(n)==true) cout << "Bag contains " << n << endl; else cout << "Bag does not contain " << n << endl; cout << "Enter an int to look up it's occurrence in the Bag: "; cin >> n; if(bag.find(n) > 0) cout << "The int "<< n <<" occurrs: " << bag.find(n) << " times in the bag" << endl; else cout << "Bag does not contain " << n << endl; bag.clear(); cout << "bag:\n" << bag << endl; bag = bagI; cout << "bag:\n" << bag << endl; return 0; }
size_t bag_size(V const & v, G const& g) { return bag(g, v).size(); }