void DetectWithOF5::detect(cv::Mat &origin, std::vector<cv::Rect> &targets, int &flipped_index) { origin_ = origin; curr_ = now(); ++frame_idx_; // 保存历史 save_hist(origin); #if 0 // FIXME: 从最后的diff历史中,找可能的目标位置 // 在积分图中搜索 ... cv::Mat bin_diff = hist_.rget_diff(0); std::vector<cv::Rect> ts = find_diff_clusters(bin_diff); for (int i = 0; i < ts.size(); i++) { cv::rectangle(origin_, ts[i], cv::Scalar(255, 255, 255), 2); } #endif // 从最新的diff中,查找轮廓, 合并轮廓.. CONTOURS regions; find_contours(regions); cv::drawContours(origin, regions, -1, cv::Scalar(0, 255, 255), 1); // 合并轮廓到“活动区域” merge_motions(regions); draw_motions(); // 对 motion 进行跟踪 ... tracking_motions(); draw_tracking(); // 稠密光流 sum_motions_dense_of(); draw_motions_dense_of(); // 返回 }
/* ** ================ BEGIN MAIN SECTION ======================= */ void target_tracking( int argc, char** argv ) { double t1, t2, fps; int targ_selected = 0; int camera_img_height, camera_img_width, camera_img_fps; int waitkey_delay = 2; CvMat *image_gray = 0; CvMat *image_binary = 0; IplConvKernel *morph_kernel; CvSeq *contours; int i; pid = (int) getpid(); /* ** See if we can catch signals */ if ( signal(SIGTERM, sig_handler) == SIG_IGN) signal(SIGTERM, SIG_IGN); if ( signal(SIGHUP, sig_handler) == SIG_IGN) signal(SIGHUP, SIG_IGN); if ( signal(SIGINT, sig_handler) == SIG_IGN) signal(SIGINT, SIG_IGN); /* ** Set the number of tracked targets to zero ** tracked targets are persistent targets for a sequence of frames/images ** detected targets are the targets detected from a single frame or image */ num_tracked_targets = 0; /* ** Capture images from webcam /dev/video0 ** /dev/video0 = 0 ** /dev/video1 = 1 */ if ( argc == 1 || (argc == 2 && strlen(argv[1]) == 1 && isdigit(argv[1][0]))) { printf(" Capturing image from camera\n"); camera=cvCaptureFromCAM( argc == 2 ? argv[1][0] - '0' : 0 ); } else { /* ** Capture image from file at command line ** useful for playback video and testing */ camera = cvCaptureFromFile( argv[1] ); } /* ** Check and see if camera/file capture is valid */ if (!camera) { printf("camera or image is null\n"); return; } /* ** Get camera properties */ camera_img_width = cvGetCaptureProperty(camera, CV_CAP_PROP_FRAME_WIDTH); camera_img_height = cvGetCaptureProperty(camera, CV_CAP_PROP_FRAME_HEIGHT); camera_img_fps = cvGetCaptureProperty(camera, CV_CAP_PROP_FPS); cvSetCaptureProperty( camera, CV_CAP_PROP_EXPOSURE, 500.0); cvSetCaptureProperty( camera, CV_CAP_PROP_EXPOSURE, 500.0); cvSetCaptureProperty( camera, CV_CAP_PROP_AUTO_EXPOSURE, 0.0); /* ** Parse the config file */ // T456_parse_vision( "../config/t456-vision.ini" ); T456_parse_vision( "/usr/local/config/t456-vision.ini" ); T456_print_camera_and_tracking_settings(); /* ** Start server listening on port 8080 */ T456_start_server(); /* ** Setup graphic display windows using OpenCV */ #ifdef GRAPHICS cvNamedWindow("original", CV_WINDOW_AUTOSIZE); cvNamedWindow("binary image", CV_WINDOW_AUTOSIZE); cvNamedWindow("contours", CV_WINDOW_AUTOSIZE); // cvNamedWindow("original", CV_WINDOW_KEEPRATIO); printf("camera_img_fps: %d\n", camera_img_fps); if (camera_img_fps < 0 ) { camera_img_fps = 30; printf("camera_img_fps: %d\n", camera_img_fps); } waitkey_delay = (int) ((1.0f / (float) camera_img_fps) * 1000.0f); printf("waitkey_delay: %d\n", waitkey_delay); if ( (waitkey_delay > 50) || (waitkey_delay == 0) ) waitkey_delay = 2; cvInitFont( &font, CV_FONT_HERSHEY_PLAIN, 1.0, 1.0, 0.0, 1, 8); #endif /* GRAPHICS */ /* ** Construct a morphological kernel */ morph_kernel = cvCreateStructuringElementEx(3, 3, 1, 1, CV_SHAPE_RECT, NULL); /* ** */ image_gray = cvCreateMat(camera_img_height, camera_img_width, CV_8UC1); image_binary = cvCreateMat(camera_img_height, camera_img_width, CV_8UC1); /* ** Time estimation variable */ t1 = (double)cvGetTickCount(); /* ** Process images until key press */ #ifdef GRAPHICS while (cvWaitKey(waitkey_delay) < 0) #else while (1) #endif { /* ** reset detected targets */ num_detect_targets = 0; /* ** Grab initial frame from image or movie file */ image = cvQueryFrame(camera); if ( !image ) { done(); return; } /* ** Convert RGB image to Hue and Value images (local function) ** ** Threshold out the lime green color from the hue image ** it is a bracket threshold, so two images are produced ** ** (HUE_MID_THRESH - 15) < target_color < (HUE_MID_THRESH + 15) ** ** Threshold value image. Because of illumination, the targets should ** be very bright in the image. ** ** If pixel value of the image is > VAL_THRESH ** then output 255 ** else output 0 ** ** Combine hue1, hue2, and value images with an AND operation ** output = hue1 AND hue1 AND value ** if a pixel in any of these images is pixel = 0 ** then the output is 0 */ T456_change_RGB_to_binary( image, image_binary ); /* ** Use the Morph function to join broken or speckled rectangles */ cvMorphologyEx( image_binary, image_gray, NULL, morph_kernel, CV_MOP_CLOSE, tracking.morph_closing_iterations); /* ** Process contours and detect targets of interest */ contours = find_contours( image_gray ); /* ** Process raw target detections and compare with current ** tracked targets */ track_targets_over_time( frame_cnt ); /* ** Rank and select highest scoring target */ if ( num_tracked_targets > 0 ) { T456_calculate_aimpoint( frame_cnt ); targ_selected = T456_select_main_target( frame_cnt ); #ifdef GRAPHICS draw_target_dot( tracked_targets[targ_selected], image, CV_RGB(0,255,0) ); #endif } /* ** Print out tracked targets */ printf("%d ", frame_cnt); for ( i = 0; i < num_tracked_targets; i++ ) { printf("%d ", tracked_targets[i].type); printf("%.2f %.2f ", tracked_targets[i].h_angle, tracked_targets[i].v_angle); printf("%.2f %.2f ", tracked_targets[i].xcenter, tracked_targets[i].ycenter); printf("(%.2f) ", tracked_targets[i].aspect_ratio); printf("(d: %.2f) ", tracked_targets[i].distance); printf("%d ", tracked_targets[i].time_tracked); #ifdef GRAPHICS draw_target_center( tracked_targets[i], image, CV_RGB(255,255,0) ); #endif } printf("\n"); /* ** pass selected target information into target message ** for the webservice */ if ( num_tracked_targets == 0 ) { target_message_length = snprintf(target_message, sizeof(target_message), "%06d,00,000000,000000,000000,0000", frame_cnt); } else { target_message_length = snprintf(target_message, sizeof(target_message), "%06d,%02d,%06.2f,%06.2f,%06.1f,1.0", frame_cnt, tracked_targets[targ_selected].type, tracked_targets[targ_selected].aim_h_angle, tracked_targets[targ_selected].aim_v_angle, tracked_targets[targ_selected].distance); } #ifdef GRAPHICS /* ** Display images */ cvShowImage("original",image); cvShowImage("binary image",image_binary); #endif /* GRAPHICS */ /* ** keep time of processing */ t2 = (double)cvGetTickCount(); fps = 1000.0 / ((t2-t1)/(cvGetTickFrequency()*1000.)); fps_sum = fps_sum + fps; frame_cnt++; #ifdef DIAG printf("time: %gms fps: %.2g\n",(t2-t1)/(cvGetTickFrequency()*1000.),fps); #endif t1 = t2; /* ** If we catch a stop or kill signal */ if ( STOP ) { break; } /* ** Print out a diagnostic image every second */ #ifdef GRAPHICS if ( (frame_cnt % 15) == 0 ) { if ( tracking.diag == 1 ) { sprintf(filename,"/home/panda/Images/frame_%05d_%06d.jpg", pid,frame_cnt); cvSaveImage(filename, image, 0 ); } if ( tracking.diag == 2 ) { sprintf(filename,"/home/panda/Images/frame_%05d_%06d_gray.jpg", pid,frame_cnt); cvSaveImage(filename, image_gray, 0 ); } if ( tracking.diag == 3 ) { sprintf(filename,"/home/panda/Images/frame_%05d_%06d_bin.jpg", pid,frame_cnt); cvSaveImage(filename, image_binary, 0 ); } } #endif } cvSaveImage("framegrab.jpg", image, 0 ); cvSaveImage("framegrab_bin.jpg", image_binary, 0 ); cvSaveImage("framegrab_gray.jpg", image_gray, 0 ); /* ** Release camera resources */ cvReleaseCapture(&camera); /* ** Print out timing information */ done(); /* ** Stop server listening on port 8080 */ T456_stop_server(); }
int main() { cv::Mat src = cv::imread("/Users/Qt/program/blogsCodes/pic/perspective08.jpg"); if (src.empty()){ std::cerr<<"can't open image"<<std::endl; return - 1; } //step 1 : convert the image to binary by otsu threshold cv::Mat binary; cv::cvtColor(src, binary, CV_BGR2GRAY); cv::threshold(binary, binary, 0, 255, cv::THRESH_BINARY + cv::THRESH_OTSU); #ifdef DRAW_OUTPUT cv::imshow("otsu", binary); cv::imwrite("otsu.jpg", binary); #endif //step 2 : find the contours of binary image auto contours = find_contours(binary); #ifdef DRAW_OUTPUT cv::Mat contour_map = cv::Mat::zeros(binary.size(), CV_8U); cv::drawContours(contour_map, contours, -1, cv::Scalar(255)); cv::imshow("contour map", contour_map); cv::imwrite("contour_map.jpg", contour_map); #endif //step 3 : find the corners of the quadrilaterals auto const corners = get_corners(contours); #ifdef DRAW_OUTPUT cv::Mat src_corners = src.clone(); cv::Scalar color[] = { {255, 0, 0}, {0, 255, 0}, {0, 0, 255} }; for(size_t i = 0; i != corners.size(); ++i){ for(auto const &point : corners[i]){ cv::circle(src_corners, point, 10, color[i % 3]); } } cv::imshow("src corners", src_corners); cv::imwrite("corners.jpg", src_corners); #endif //step 4 : correct the perspective distortion of the quadrilateral by warpPerspective cv::Mat target(150, 150, src.type()); std::vector<cv::Point2f> target_points { {0, 0}, {target.cols - 1, 0}, {target.cols - 1, target.rows - 1}, {0, target.rows - 1} }; std::vector<cv::Point2f> points; for(size_t i = 0; i != corners.size(); ++i){ OCV::convert_points(corners[i], points); cv::Mat const trans_mat =cv::getPerspectiveTransform(points, target_points); cv::warpPerspective(src, target, trans_mat, target.size()); #ifdef DRAW_OUTPUT std::stringstream name; name << "target" << std::setw(2) << std::setfill('0') << i <<".jpg"; cv::imshow("target", target); cv::imwrite(name.str(), target); cv::waitKey(); #endif } #ifdef DRAW_OUTPUT cv::waitKey(); #endif return 0; }