void cvTackBarEvents(int pos,void*) { if (i_threshold_param1 < 3) i_threshold_param1 = 3; if (i_threshold_param1 % 2 != 1) i_threshold_param1++; if (i_threshold_param2 < 1) i_threshold_param2 = 1; threshold_param1 = i_threshold_param1; threshold_param2 = i_threshold_param2; marker_dector.setThresholdParams(threshold_param1, threshold_param2); //recompute marker_dector.detect(input_image, markers, camera_params); input_image.copyTo(input_image_copy); for(unsigned int i=0; i < markers.size(); i++) markers[i].draw(input_image, Scalar(0, 0, 255), 2); //draw a 3d cube in each marker if there is 3d info if (camera_params.isValid()) for(unsigned int i=0; i < markers.size(); i++) aruco::CvDrawingUtils::draw3dCube(input_image_copy, markers[i], camera_params); cv::imshow("in", input_image_copy); cv::imshow("thres", marker_dector.getThresholdedImage()); }
void image_callback(const sensor_msgs::ImageConstPtr& msg) { static tf::TransformBroadcaster br; if(cam_info_received_) { cv_bridge::CvImagePtr cv_ptr; try { cv_ptr = cv_bridge::toCvCopy(msg, sensor_msgs::image_encodings::RGB8); inImage_ = cv_ptr->image; //clear out previous detection results markers_.clear(); marker_msg_->markers.clear(); //Ok, let's detect mDetector_.detect(inImage_, markers_, camParam_, marker_size_, false); marker_msg_->markers.resize(markers_.size()); ros::Time curr_stamp(ros::Time::now()); marker_msg_->header.stamp = curr_stamp; marker_msg_->header.seq++; //get the current transform from the camera frame to output ref frame tf::StampedTransform cameraToReference; cameraToReference.setIdentity(); if ( reference_frame_ != camera_frame_ ) { getTransform(reference_frame_, camera_frame_, cameraToReference); } //Now find the transform for each detected marker for(size_t i=0; i < markers_.size(); ++i) { tf::Transform transform = aruco_ros::arucoMarker2Tf(markers_[i]); transform = static_cast<tf::Transform>(cameraToReference) * transform; //Maybe change this to broadcast separate transforms for each marker? //tf::StampedTransform stampedTransform(transform, curr_stamp, //reference_frame_, marker_frame_); //br.sendTransform(stampedTransform); aruco_msgs::Marker & marker_i = marker_msg_->markers.at(i); tf::poseTFToMsg(transform, marker_i.pose.pose); marker_i.header.frame_id = reference_frame_; marker_i.header.stamp = curr_stamp; marker_i.id = markers_.at(i).id; marker_i.confidence = 1.0; markers_[i].draw(inImage_,cv::Scalar(0,0,255),2); } //publish marker array if (marker_msg_->markers.size() > 0) marker_pub_.publish(marker_msg_); //draw a 3d cube in each marker if there is 3d info if(camParam_.isValid() && marker_size_!=-1) { for(size_t i=0; i<markers_.size(); ++i) aruco::CvDrawingUtils::draw3dAxis(inImage_, markers_[i], camParam_); } if(image_pub_.getNumSubscribers() > 0) { //show input with augmented information cv_bridge::CvImage out_msg; out_msg.header.stamp = curr_stamp; out_msg.encoding = sensor_msgs::image_encodings::RGB8; out_msg.image = inImage_; image_pub_.publish(out_msg.toImageMsg()); } if(debug_pub_.getNumSubscribers() > 0) { //show also the internal image resulting from the threshold operation cv_bridge::CvImage debug_msg; debug_msg.header.stamp = curr_stamp; debug_msg.encoding = sensor_msgs::image_encodings::MONO8; debug_msg.image = mDetector_.getThresholdedImage(); debug_pub_.publish(debug_msg.toImageMsg()); } } catch (cv_bridge::Exception& e) { ROS_ERROR("cv_bridge exception: %s", e.what()); } } }
int main() { // 打开摄像头 video_capture.open(0); if (!video_capture.isOpened()){ std::cerr << "Could not open video" << endl; return -1; } // 获取第一张图像,用于这设置参数 video_capture >> input_image; // 读取摄像机参数 camera_params.readFromXMLFile("camera.yml"); camera_params.resize(input_image.size()); // 注册窗口 cv::namedWindow("thres",1); cv::namedWindow("in",1); marker_dector.getThresholdParams(threshold_param1, threshold_param2); i_threshold_param1 = threshold_param1; i_threshold_param2 = threshold_param2; cv::createTrackbar("ThresParam1", "in",&i_threshold_param1, 13, cvTackBarEvents); cv::createTrackbar("ThresParam2", "in",&i_threshold_param2, 13, cvTackBarEvents); char key=0; int index=0; //capture until press ESC or until the end of the video while( key!=27 && video_capture.grab()) { //copy image video_capture.retrieve(input_image); //number of images captured index++; //for checking the speed double tick = (double)cv::getTickCount(); //Detection of markers in the image passed marker_dector.detect(input_image, markers, camera_params, marker_size); //chekc the speed by calculating the mean speed of all iterations average_time.first += ((double)cv::getTickCount() - tick) / cv::getTickFrequency(); average_time.second++; std::cout << "Time detection=" << 1000 * average_time.first / average_time.second << " milliseconds" << endl; //print marker info and draw the markers in image input_image.copyTo(input_image_copy); for(unsigned int i = 0; i < markers.size(); i++){ std::cout << markers[i] << std::endl; markers[i].draw(input_image, Scalar(0, 0, 255), 2); } //draw a 3d cube in each marker if there is 3d info if ( camera_params.isValid()) for(unsigned int i = 0; i < markers.size(); i++){ aruco::CvDrawingUtils::draw3dCube(input_image, markers[i], camera_params); aruco::CvDrawingUtils::draw3dAxis(input_image, markers[i], camera_params); } //DONE! Easy, right? cout << endl << endl << endl; //show input with augmented information and the thresholded image cv::imshow("in", input_image); cv::imshow("thres", marker_dector.getThresholdedImage()); key=cv::waitKey(0);//wait for key to be pressed } return 0; }