int main(int argc, char **argv) { ros::init(argc, argv, "uvc_cam"); ros::NodeHandle n; ros::NodeHandle n_private("~"); std::string device; std::string out_topic; n_private.param<std::string>("device", device, "/dev/video0"); n_private.param<std::string>("topic", out_topic, "/camera/image"); int width, height, fps, modetect_lum, modetect_count; n_private.param("width", width, 640); n_private.param("height", height, 480); n_private.param("fps", fps, 30); n_private.param("motion_threshold_luminance", modetect_lum, 100); n_private.param("motion_threshold_count", modetect_count, -1); ros::Publisher pub = n.advertise<sensor_msgs::Image>(out_topic.c_str(), 1); ROS_INFO("opening uvc_cam at %dx%d, %d fps", width, height, fps); uvc_cam::Cam cam(device.c_str(), uvc_cam::Cam::MODE_RGB, width, height, fps); cam.set_motion_thresholds(modetect_lum, modetect_count); ros::Time t_prev(ros::Time::now()); int count = 0, skip_count = 0; while (n.ok()) { unsigned char *frame = NULL; uint32_t bytes_used; int buf_idx = cam.grab(&frame, bytes_used); if (count++ % fps == 0) { ros::Time t(ros::Time::now()); ros::Duration d(t - t_prev); ROS_INFO("%.1f fps skip %d", (double)fps / d.toSec(), skip_count); t_prev = t; } if (frame) { sensor_msgs::Image image; image.header.stamp = ros::Time::now(); image.encoding = sensor_msgs::image_encodings::RGB8; image.height = height; image.width = width; image.step = 3 * width; image.set_data_size( image.step * image.height ); /* uint8_t* bgr = &(image.data[0]); for (uint32_t y = 0; y < height; y++) for (uint32_t x = 0; x < width; x++) { // hack... flip bgr to rgb uint8_t *p = frame + y * width * 3 + x * 3; uint8_t *q = bgr + y * width * 3 + x * 3; q[0] = p[2]; q[1] = p[1]; q[2] = p[0]; } */ memcpy(&image.data[0], frame, width * height * 3); pub.publish(image); cam.release(buf_idx); } else skip_count++; } return 0; }
/* ------------------------------------------------------------------------- */ void MainWindow::on_method2_clicked() { int index_prev; int index_now; QString ymlFile; vector<KeyPoint> imgpts1_tmp; vector<KeyPoint> imgpts2_tmp; imgpts.resize(filelist.size()); on_PB_Sift_clicked(); cout << endl << endl << endl << "Using Method 2:" <<endl; vector<DMatch> matches; cv::Matx34d P_0; cv::Matx34d P_1; P_0 = cv::Matx34d(1,0,0,0, 0,1,0,0, 0,0,1,0); cv::Mat_<double> t_prev = (cv::Mat_<double>(3,1) << 0, 0, 0); cv::Mat_<double> R_prev = (cv::Mat_<double>(3,3) << 0, 0, 0, 0, 0, 0, 0, 0, 0); cv::Mat_<double> R_prev_inv = (cv::Mat_<double>(3,3) << 0, 0, 0, 0, 0, 0, 0, 0, 0); cv::Mat_<double> t_now = (cv::Mat_<double>(3,1) << 0, 0, 0); cv::Mat_<double> R_now = (cv::Mat_<double>(3,3) << 0, 0, 0, 0, 0, 0, 0, 0, 0); cv::Mat_<double> t_new = (cv::Mat_<double>(3,1) << 0, 0, 0); cv::Mat_<double> R_new = (cv::Mat_<double>(3,3) << 0, 0, 0, 0, 0, 0, 0, 0, 0); reconstruct_first_two_view(); std::cout << "Pmat[0] = " << endl << Pmats[0]<<endl; std::cout << "Pmat[1] = " << endl << Pmats[1]<<endl; for(index_now = 2; index_now<filelist.size(); index_now++) { cout << endl << endl << endl <<endl; cout << "dealing with " << filelist.at(index_now).fileName().toStdString() << endl; cout << endl; index_prev = index_now - 1; descriptors1.release(); descriptors1 = descriptors2; descriptors2.release(); ymlFile = ymlFileDir; ymlFile.append(filelist.at(index_now).fileName()).append(".yml"); restore_descriptors_from_file(ymlFile.toStdString(),imgpts[index_now],descriptors2); matches.clear(); //matching matching_fb_matcher(descriptors1,descriptors2,matches); matching_good_matching_filter(matches); imggoodpts1.clear(); imggoodpts2.clear(); P_0 = cv::Matx34d(1,0,0,0, 0,1,0,0, 0,0,1,0); P_1 = cv::Matx34d(1,0,0,0, 0,1,0,0, 0,0,1,0); outCloud.clear(); if(FindCameraMatrices(K,Kinv,distcoeff, imgpts[index_prev],imgpts[index_now], imggoodpts1,imggoodpts2, P_0,P_1, matches, outCloud)) {//if can find camera matries R_prev(0,0) = Pmats[index_prev](0,0); R_prev(0,1) = Pmats[index_prev](0,1); R_prev(0,2) = Pmats[index_prev](0,2); R_prev(1,0) = Pmats[index_prev](1,0); R_prev(1,1) = Pmats[index_prev](1,1); R_prev(1,2) = Pmats[index_prev](1,2); R_prev(2,0) = Pmats[index_prev](2,0); R_prev(2,1) = Pmats[index_prev](2,1); R_prev(2,2) = Pmats[index_prev](2,2); t_prev(0) = Pmats[index_prev](0,3); t_prev(1) = Pmats[index_prev](1,3); t_prev(2) = Pmats[index_prev](2,3); R_now(0,0) = P_1(0,0); R_now(0,1) = P_1(0,1); R_now(0,2) = P_1(0,2); R_now(1,0) = P_1(1,0); R_now(1,1) = P_1(1,1); R_now(1,2) = P_1(1,2); R_now(2,0) = P_1(2,0); R_now(2,1) = P_1(2,1); R_now(2,2) = P_1(2,2); t_now(0) = P_1(0,3); t_now(1) = P_1(1,3); t_now(2) = P_1(2,3); invert(R_prev, R_prev_inv); t_new = t_prev + R_prev * t_now ; R_new = R_now * R_prev; // //store estimated pose Pmats[index_now] = cv::Matx34d (R_new(0,0),R_new(0,1),R_new(0,2),t_new(0), R_new(1,0),R_new(1,1),R_new(1,2),t_new(1), R_new(2,0),R_new(2,1),R_new(2,2),t_new(2)); cout << "Pmats[index_now]:" << endl << Pmats[index_now] << endl; } else { break; } } cout << endl; cout << endl; cout << endl; //visualization imgpts.clear(); imgpts.resize(filelist.size()); for( index_now = 1; index_now<filelist.size(); index_now++) { index_prev = index_now - 1; descriptors1.release(); descriptors2.release(); ymlFile = ymlFileDir; ymlFile.append(filelist.at(index_prev).fileName()).append(".yml"); cout << ymlFile.toStdString()<< endl; restore_descriptors_from_file(ymlFile.toStdString(),imgpts[index_prev],descriptors1); ymlFile = ymlFileDir; ymlFile.append(filelist.at(index_now).fileName()).append(".yml"); cout << ymlFile.toStdString()<< endl; restore_descriptors_from_file(ymlFile.toStdString(),imgpts[index_now],descriptors2); matches.clear(); //matching matching_fb_matcher(descriptors1,descriptors2,matches); matching_good_matching_filter(matches); imgpts1_tmp.clear(); imgpts2_tmp.clear(); GetAlignedPointsFromMatch(imgpts[index_prev], imgpts[index_now], matches, imgpts1_tmp, imgpts2_tmp); cout << imgpts1_tmp.size() << endl; cout << imgpts1_tmp.size() << endl; outCloud.clear(); std::vector<cv::KeyPoint> correspImg1Pt; double mean_proj_err = TriangulatePoints(imgpts1_tmp, imgpts2_tmp, K, Kinv,distcoeff, Pmats[index_prev], Pmats[index_now], outCloud, correspImg1Pt); std::vector<CloudPoint> outCloud_tmp; outCloud_tmp.clear(); for (unsigned int i=0; i<outCloud.size(); i++) { if(outCloud[i].reprojection_error <= 3){ //cout << "surving" << endl; outCloud[i].imgpt_for_img.resize(filelist.size()); for(int j = 0; j<filelist.size();j++) { outCloud[i].imgpt_for_img[j] = -1; } outCloud[i].imgpt_for_img[index_now] = matches[i].trainIdx; outCloud_tmp.push_back(outCloud[i]); } } outCloud.clear(); outCloud= outCloud_tmp; cout << outCloud_tmp.size() << endl; for(unsigned int i=0;i<outCloud.size();i++) { outCloud_all.push_back(outCloud[i]); } outCloud_new = outCloud; } for( int i = 0; i<filelist.size(); i++) Pmats[i](1,3) =0; GetRGBForPointCloud(outCloud_all,pointCloudRGB); ui->widget->update(getPointCloud(), getPointCloudRGB(), getCameras()); cout << "finished" <<endl <<endl; }