static int openVideo(cv::VideoCapture &capture, std::string filename){ std::cout << "abriendo video " << filename << std::endl; capture.open(filename); if (!capture.isOpened()) { std::cout << "abriendo camara " << filename << std::endl; int id_webcam = std::atoi(filename.c_str()); capture.open(id_webcam); } if (!capture.isOpened()) { // std::cout << "no puedo abrir " << filename << std::endl; return 0; } return 1; }
bool readParameters(int argc, char** argv) { if (argc<4) { usage(); return false; } TheInputVideo=argv[1]; /* // read input video if (TheInputVideo=="live") TheVideoCapturer.open(0); else TheVideoCapturer.open(argv[1]); if (!TheVideoCapturer.isOpened()) { cerr<<"Could not open video"<<endl; return false; }*/ //read from camera if (TheInputVideo=="live") TheVideoCapturer.open(0); else TheVideoCapturer.open(TheInputVideo); if (!TheVideoCapturer.isOpened()) { cerr<<"Could not open video"<<endl; return -1; } // read intrinsic file try { CameraParams.readFromXMLFile(argv[2]); } catch (std::exception &ex) { cout<<ex.what()<<endl; return false; } // read board file try { TheBoardConfig.readFromFile(argv[3]); } catch (std::exception &ex) { cout<<ex.what()<<endl; return false; } if(argc>4) TheMarkerSize=atof(argv[4]); else TheMarkerSize=1.; return true; }
void mono_handler(const lcm_recv_buf_t *rbuf, const char* channel, const lcmt_stereo *msg, void *user) { // open the frame of the video specified by the message // check to see if the current video is the correct video file if (current_video_number != msg->video_number) { video_capture.release(); std::string newfile = GetMonoFilename(msg->timestamp, msg->video_number); if (newfile.empty()) { return; } std::cout << "Opening file: " << newfile << std::endl; if (!video_capture.open(newfile)) { std::cerr << "Failed to open file: " << newfile << std::endl; return; } current_video_number = msg->video_number; } video_capture.set(CV_CAP_PROP_POS_FRAMES, msg->frame_number + frame_offset); cv::Mat frame; video_capture >> frame; SendImageOverLcm(lcm_, image_channel, frame); }
void initVideoStream(cv::VideoCapture &cap) { if (cap.isOpened()) cap.release(); cap.open(0); // open the default camera }
void InitOpenCVModules() //OPENCV 데이터들의 초기화 { /*----------------------*/ // // // // /*-----------------------*/ if (Webcam_mode) { camera.open(Webcam_number); if (!camera.isOpened()) //소스 영상 에러체크 { //error in opening the video input cerr << "Unable to open Camera Stream" << endl; exit(EXIT_FAILURE); } camera >> Current_Frame; //카메라 소스에서 Current Frame으로 데이터를 넣어준다. } else {
void open(string name, int type){ capture_type = type; if(capture_type==0){ videoCapture.open(name); }else{ imageCapture.open(name); } };
bool SubsExtractor::open(string file) { videofile = file; bool o = cap->open(videofile); StartFrame = 0; EndFrame = cap->get(CV_CAP_PROP_FRAME_COUNT); return o; }
bool init_opencv() { if (!capture.open(0)) { std::cerr << "error: capture.open() failed..." << std::endl; exit(-1); } capture.set(CV_CAP_PROP_FRAME_WIDTH, 640); capture.set(CV_CAP_PROP_FRAME_HEIGHT, 480); result_img.create(cv::Size(640, 480), CV_8UC3); return true; }
void openCamera(cv::VideoCapture& cap, int argc, const char** argv) { cv::CommandLineParser parser(argc, argv, keys); int camNum = parser.get<int>("1"); cap.open(camNum); if (!cap.isOpened()) { //help(); std::cout << "***Could not initialize capturing...***\n"; std::cout << "Current parameter's value: \n"; exit (-1); } cap.set(CV_CAP_PROP_FRAME_WIDTH, 160); cap.set(CV_CAP_PROP_FRAME_HEIGHT, 120); }
void initParams::readAndSet(cv::VideoCapture& cap, char* input){ cv::FileStorage fs(input, cv::FileStorage::READ); if(fs.isOpened()){ fs.open(input, cv::FileStorage::READ); fs["frameWidth"] >> frameWidth; fs["frameHeight"] >> frameHeight; fs["fps"] >> fps; fs["camId"] >> camId; fs["generateData"] >> genData; fs["visualizeData"] >> visualize; fs["writeVideo"] >> writeVideo; fs["generatePerfData"] >> genPerfData; fs["videoFile"] >> videoFile; fs.release(); cap.open(videoFile); if(!cap.isOpened()){ std::cerr << "Could not access video file: " << videoFile << std::endl; exit(EXIT_FAILURE); } }
void init() { //摄像头 camera.open(0); if(!camera.isOpened()) { std::cout << "Can not find camera!" << std::endl; exit(-1); } camera.set(cv::CAP_PROP_FRAME_WIDTH,160); camera.set(cv::CAP_PROP_FRAME_HEIGHT,120); /* double width = camera.get(cv::CAP_PROP_FRAME_WIDTH); double height = camera.get(cv::CAP_PROP_FRAME_HEIGHT); std::cout << "width:" << width << "\t"; std::cout << "height:" << height << "\t" << std::endl;*/ face_cascade_name = "/usr/share/OpenCV/haarcascades/haarcascade_frontalface_alt.xml"; if(!face_cascade.load(face_cascade_name)) { std::cout << "can not find face_cascade_file!" << std::endl; exit(-1); } running = true; pthread_mutex_init(&mutexLock, NULL); pthread_create(&grabThread, NULL, grabFunc, NULL); //Setup edi robot mraa control lib spider_init(); signal(SIGINT, clear); signal(SIGTERM, clear); }
DWORD WINAPI video_test(void* data) { MyFrame* f = (MyFrame*)data; wxClientDC dc(f->left_bottom); cap.open(0); cv::Mat img; cap >> img; Sleep(2000); initFrame->Hide(); // 开始显示 for (int i = 0; i < 50; i++) { f->SetTransparent(i + 206); Sleep(i / 10); } while (cap>>img,!img.empty()) { IplImage image = img.operator IplImage(); wxImage wximg = wx_from_cv(&image); wximg.Rescale(320, 240); dc.DrawBitmap(wxBitmap(wximg), wxDefaultPosition); } return 0; }
int main(int argc, char**argv) { capture.open(0); if (capture.isOpened() == false) { std::cerr << "no capture device found" << std::endl; return 1; } capture.set(CV_CAP_PROP_FRAME_WIDTH, vgaSize.width); capture.set(CV_CAP_PROP_FRAME_HEIGHT, vgaSize.height); if (capture.get(cv::CAP_PROP_FRAME_WIDTH) != (double)vgaSize.width || capture.get(cv::CAP_PROP_FRAME_HEIGHT) != (double)vgaSize.height) { std::cerr << "current device doesn't support " << vgaSize.width << "x" << vgaSize.height << " size" << std::endl; return 2; } cv::Mat image; capture >> image; cv::namedWindow(windowName); cv::imshow(windowName, image); initCuda(); initArray(image); char key = -1; enum device statusDevice = useCpuSimd; enum precision statusPrecision = precisionFloat; int index = 1; cv::Mat stub = cv::imread(imagePath[index][0], cv::IMREAD_UNCHANGED); cv::Mat gain = cv::Mat(stub.rows, stub.cols/2, CV_16SC1, stub.data); double elapsedTime; while (isFinish(key) == false) { capture >> image; switch (key) { case 'h': case 'H': // switch to half precision statusPrecision = precisionHalf; std::cout << std::endl << header << "half " << std::endl; stub = cv::imread(imagePath[index][0], cv::IMREAD_UNCHANGED); gain = cv::Mat(stub.rows, stub.cols/2, CV_16SC1, stub.data); break; case 'f': case 'F': // switch to single precision statusPrecision = precisionFloat; std::cout << std::endl << header << "single" << std::endl; stub = cv::imread(imagePath[index][1], cv::IMREAD_UNCHANGED); gain = cv::Mat(stub.rows, stub.cols, CV_32FC1, stub.data); break; case 'b': case 'B': // switch to gray gain statusPrecision = precisionByte; std::cout << std::endl << header << "char" << std::endl; gain = cv::imread(imagePath[index][2], cv::IMREAD_GRAYSCALE); break; case '0': case '1': index = key - '0'; switch (statusPrecision) { case precisionHalf: // precision half stub = cv::imread(imagePath[index][0], cv::IMREAD_UNCHANGED); gain = cv::Mat(stub.rows, stub.cols/2, CV_16SC1, stub.data); break; case precisionFloat: // precision single stub = cv::imread(imagePath[index][1], cv::IMREAD_UNCHANGED); gain = cv::Mat(stub.rows, stub.cols, CV_32FC1, stub.data); break; case precisionByte: // precision single gain = cv::imread(imagePath[index][2], cv::IMREAD_GRAYSCALE); break; default: break; } break; case 'c': case 'C': std::cout << std::endl << "Using CPU SIMD " << std::endl; statusDevice = useCpuSimd; break; case 'g': case 'G': std::cout << std::endl << "Using GPU " << std::endl; statusDevice = useGpu; break; default: break; } if (statusDevice == useCpuSimd) { elapsedTime = multiplyImage(image, gain); } else { #ifdef HAVE_CUDA // CUDA elapsedTime = multiplyImageCuda(image, gain); #endif // HAVE_CUDA } computeStatistics(elapsedTime, key); if (key == 's' || key == 'S') { cv::imwrite(dumpFilename, image); } cv::imshow(windowName, image); key = cv::waitKey(1); } std::cout << std::endl; cv::destroyAllWindows(); releaseArray(); return 0; }
void OpenCVTemplateApp::makeGUI() { interface->clear(); interface->addButton("load image", [this] { auto path = ci::app::getOpenFilePath(); image = cv::imread(path.string()); std::cout <<"cols "<<image.cols << std::endl; std::cout <<"rows "<<image.rows << std::endl; std::cout <<"channels "<<image.channels() << std::endl; imageTexture = gl::Texture::create(fromOcv(image)); }); interface->addButton("load video", [this] { auto path = ci::app::getOpenFilePath(); video.open(path.string()); frameWidth = video.get(cv::CAP_PROP_FRAME_WIDTH); frameHeight = video.get(cv::CAP_PROP_FRAME_HEIGHT); totalFrames = video.get(cv::CAP_PROP_FRAME_COUNT); video.read(frame); if(isGrayScale) { cv::cvtColor(frame, frame, cv::COLOR_BGR2GRAY); } frameTexture = gl::Texture::create(fromOcv(frame)); makeGUI(); }); interface->addSeparator(); if(frameTexture) { interface->addParam("gray scale", &isGrayScale).updateFn([this] { video.retrieve(frame); if(isGrayScale) { cv::cvtColor(frame, frame, cv::COLOR_BGR2GRAY); } frameTexture = gl::Texture::create(fromOcv(frame)); makeGUI(); }); interface->addParam("nb of feature",&nbOfFeaturePoints).min(1).max(1000); if(isGrayScale) { interface->addButton("get feature points", [this] { cv::goodFeaturesToTrack(frame, featurePoints, nbOfFeaturePoints, 0.01, 10, cv::Mat(), 3, 0, 0.04); }); } interface->addSeparator(); interface->addParam("frame",&frameIndex).min(0).max(totalFrames-1).step(1).updateFn([this] { video.set(cv::CAP_PROP_POS_FRAMES,frameIndex); video.read(frame); if(isGrayScale) { cv::cvtColor(frame, frame, cv::COLOR_BGR2GRAY); } frameTexture = gl::Texture::create(fromOcv(frame)); }); interface->addSeparator(); interface->addParam("speed", &frameSpeed).min(1).max(1000).step(1); interface->addButton("play",[this] { currentState = PLAY; makeGUI(); }); if(currentState == PLAY) { interface->addButton("pause",[this] { currentState = PAUSE; makeGUI(); }); } } }
void *cameraThread(void *arg){ std::vector<cmdData> cmd; cv::Mat frame; cv::namedWindow("Color", CV_WINDOW_AUTOSIZE); //create a window with the name "MyWindow" cv::namedWindow("Thresholded", CV_WINDOW_AUTOSIZE); //create a window with the name "HSV" //cv::namedWindow( "Contours", CV_WINDOW_AUTOSIZE ); int initShutter = 242; //int initShutter = 0; int shutterVal = initShutter; int cannyMin = 30; int blockSize = 89; int fps = 60; // Shutter slider cv::createTrackbar("Shutter","Color",&shutterVal,4095,shutterCB,NULL); // Canny treshold cv::createTrackbar("Threshold","Color",&cannyMin,255,NULL,NULL); cv::createTrackbar("BlockSize","Color",&blockSize,255,NULL,NULL); cv::Mat colorFrame; cv::Mat tresholdedFrame; cv::Mat hsvFrame; cv::Mat grey,tmp; cv::Mat contourOutput; cap.open(CV_CAP_DC1394); // Open first firewire camera. in 2.3 use CV_CAP, in 2.5 use CV::CAP cap.set(CV_CAP_PROP_WHITE_BALANCE_BLUE_U,794); // 736 cap.set(CV_CAP_PROP_WHITE_BALANCE_RED_V,437); cap.set(CV_CAP_PROP_EXPOSURE,initShutter); // "Shutter" in coriander cap.set(CV_CAP_PROP_FPS,fps); cap.set(CV_CAP_PROP_GAMMA,0); cap.set(CV_CAP_PROP_GAIN,30); while(runState){ cap >> frame; std::vector<std::vector<cv::Point> > contours; std::vector<cv::Vec4i> hierarchy; // Get color image, decode bayer BGGR. cv::cvtColor(frame,colorFrame,CV_BayerBG2RGB,0); cv::cvtColor(colorFrame, grey, CV_RGB2GRAY ); // Remove gripper from img cv::Rect roi = cv::Rect(0,0,640,360); cv::Mat submatrix = cv::Mat(grey,roi); //submatrix.setTo(cv::Scalar(255)); cv::threshold(submatrix,tresholdedFrame,cannyMin,255,cv::THRESH_BINARY_INV); if(blockSize % 2 == 0){ //Adaptive threshold block size SKAL være ulige.. blockSize = blockSize -1; } // cv::adaptiveThreshold(submatrix,tresholdedFrame,255,cv::ADAPTIVE_THRESH_GAUSSIAN_C,cv::THRESH_BINARY_INV,blockSize,0); cv::Moments mu; mu = cv::moments(tresholdedFrame,true); // Find center cv::Point2f mc = cv::Point2f( mu.m10/mu.m00 , mu.m01/mu.m00 ); // Count non zero pixels. Used for determining if we are screwed (getting large "white" areas.) cameraError.areaOfObject = cv::countNonZero(tresholdedFrame); // Draw it - convert to RGB to we can draw on it with colors cv::cvtColor(tresholdedFrame, tresholdedFrame, CV_GRAY2RGB); //cv::Mat drawing = cv::Mat::zeros( tresholdedFrame.size(), CV_8UC3 ); cv::Scalar color = cv::Scalar( 0,255,0 ); cv::circle( tresholdedFrame, mc, 5, color, -1, 8, 0 ); // Calculate distance from center of image cv::Size s = tresholdedFrame.size(); cv::Point centerOfFrame = cv::Point(s.width/2,s.height/2); float distX = centerOfFrame.x-mc.x; float distY = centerOfFrame.y-mc.y; cameraError.x = distX; cameraError.y = distY; /* OLD STUFF BELOW */ // cv::Canny( tresholdedFrame, tmp, cannyMin, cannyMin*2, 3 ); // // cv::findContours(tmp,contours,hierarchy,CV_RETR_EXTERNAL, CV_CHAIN_APPROX_SIMPLE, cv::Point(0, 0)); // // int blablalba = 0 ; // // /// Get the moments // std::vector<cv::Moments> mu(contours.size() ); // for( int i = 0; i < contours.size(); i++ ) // { mu[i] = moments( contours[i], false ); } // // /// Get the mass centers: // std::vector<cv::Point2f> mc( contours.size() ); // for( int i = 0; i < contours.size(); i++ ) // { mc[i] = cv::Point2f( mu[i].m10/mu[i].m00 , mu[i].m01/mu[i].m00 ); } // // // std::vector<cv::Rect> boundRect( contours.size() ); // std::vector<std::vector<cv::Point> > contours_poly( contours.size() ); // // Draw some countours, and maybe some bounding box // // // cv::Mat drawing = cv::Mat::zeros( tresholdedFrame.size(), CV_8UC3 ); // // // cv::Point centerOfBlock; // // for( int i = 0; i< contours.size(); i++ ) // { // // // Filter out small areas, and filter out contours that are holes // // See http://stackoverflow.com/questions/8461612/using-hierarchy-in-findcontours-in-opencv // if(cv::contourArea(contours[i]) < 200 ){ // // // }else{ // // // cv::Scalar color = cv::Scalar( 0,255,0 ); // //draw center of mass // cv::circle( drawing, mc[i], 5, color, -1, 8, 0 ); // // // cv::Point xx = cv::Point(boundRect[i].tl().x+(boundRect[i].width/2),boundRect[i].tl().y+(boundRect[i].height/2)); // //cv::circle( drawing, xx, 2, color, -1, 8, 0 ); // // centerOfBlock = mc[i]; // // // } // } // // // // /* // * Calculate distance. // // */ // // // // cv::Size s = tresholdedFrame.size(); // // cv::Point centerOfFrame = cv::Point(s.width/2,s.height/2); // // float distX = centerOfFrame.x-centerOfBlock.x; // // float distY = centerOfFrame.y-centerOfBlock.y; // // if(centerOfBlock.x == 0 || centerOfBlock.y == 0 || (fabs(distX) < 3 && fabs(distY) < 3)){ // // // // // }else{ // // // // cout << "(Dist X, Dist Y) " << distX << ", " << distY << endl; // // cameraError.x = distX; // cameraError.y = distY; // // // std::cout << outss << std::endl; // /*cmdData tmp; // // tmp.x = x_out; // tmp.y = y_out; // tmp.z = 0; // tmp.r = 0; // tmp.p =0; // tmp.ya =0; // tmp.a = a_out; // tmp.t_min = t_min_out; // tmp.distX = distX; // tmp.distY = distY; // // cmd.push_back(tmp); // */ // // // } /* END OLD STUFF */ if(!frame .data) break; if(debugMonitor){ cv::imshow("Thresholded",tresholdedFrame); // Uncomment this line to see the actual picture. It will give an unsteady FPS cv::imshow("Color",grey); // Uncomment this line to see the actual picture. It will give an unsteady FPS //cv::imshow( "Center", drawing ); } if(cv::waitKey(1) >= 27){ break; } // We wait 1ms - so that the frame can be drawn. break on ESC } cv::destroyWindow("Color"); //destroy the window with the name, "MyWindow" cv::destroyWindow("Thresholded"); }
int main() { // 打开摄像头 video_capture.open(0); if (!video_capture.isOpened()){ std::cerr << "Could not open video" << endl; return -1; } // 获取第一张图像,用于这设置参数 video_capture >> input_image; // 读取摄像机参数 camera_params.readFromXMLFile("camera.yml"); camera_params.resize(input_image.size()); // 注册窗口 cv::namedWindow("thres",1); cv::namedWindow("in",1); marker_dector.getThresholdParams(threshold_param1, threshold_param2); i_threshold_param1 = threshold_param1; i_threshold_param2 = threshold_param2; cv::createTrackbar("ThresParam1", "in",&i_threshold_param1, 13, cvTackBarEvents); cv::createTrackbar("ThresParam2", "in",&i_threshold_param2, 13, cvTackBarEvents); char key=0; int index=0; //capture until press ESC or until the end of the video while( key!=27 && video_capture.grab()) { //copy image video_capture.retrieve(input_image); //number of images captured index++; //for checking the speed double tick = (double)cv::getTickCount(); //Detection of markers in the image passed marker_dector.detect(input_image, markers, camera_params, marker_size); //chekc the speed by calculating the mean speed of all iterations average_time.first += ((double)cv::getTickCount() - tick) / cv::getTickFrequency(); average_time.second++; std::cout << "Time detection=" << 1000 * average_time.first / average_time.second << " milliseconds" << endl; //print marker info and draw the markers in image input_image.copyTo(input_image_copy); for(unsigned int i = 0; i < markers.size(); i++){ std::cout << markers[i] << std::endl; markers[i].draw(input_image, Scalar(0, 0, 255), 2); } //draw a 3d cube in each marker if there is 3d info if ( camera_params.isValid()) for(unsigned int i = 0; i < markers.size(); i++){ aruco::CvDrawingUtils::draw3dCube(input_image, markers[i], camera_params); aruco::CvDrawingUtils::draw3dAxis(input_image, markers[i], camera_params); } //DONE! Easy, right? cout << endl << endl << endl; //show input with augmented information and the thresholded image cv::imshow("in", input_image); cv::imshow("thres", marker_dector.getThresholdedImage()); key=cv::waitKey(0);//wait for key to be pressed } return 0; }
int main(int argc, char** argv) { //ROS ROS_INFO("sks_vision without ui"); ros::init(argc, argv, "sks_vision_nui"); ros::NodeHandle nh; ros::Rate loop_rate(1000); // image_transport::ImageTransport it(nh); ros::Publisher pub_m = nh.advertise<geometry_msgs::Vector3>("/sks_vision",1); ros::Subscriber position_sub = nh.subscribe("/slam_out_pose",10,current_pos); nh.getParam("/SKS/vision/auto_gray",para_auto_gray); nh.getParam("/SKS/vision/gray_sum",para_threshold_num); while(1){ cap.open(200); if(cap.isOpened()) break; else std::cout<<"camera isnt opened"<<std::endl; } while (nh.ok()){ cap >> cvframe; if(!cvframe.empty()) { frame = MatToQImage(cvframe); // frame = func_Gray(frame); frame = func_DeepGray(frame); //============================================================== int blacknum=0; // for(int h=0;h<frame.height();h+=2){ // for(int w=0;w<frame.width();w+=2){ // if(qRed(frame.pixel(w,h))<=64&&qRed(frame.pixel(w,h))>=27) blacknum++;//60,20 // } // } // std::cout<<blacknum<<std::endl; //=============================================================== //Threshold_bar --> Threshold int gray; if(para_auto_gray){ gray = para_threshold_num; }else{ // if(blacknum>=BLACK_BLOCK){ // gray=70; // }else{ gray = Average_Threshold(frame); // gray = Qtsu(frame); if(cu_pos.pose.position.x<-1.07+0.2&&cu_pos.pose.position.x>-3.14-0.2&& cu_pos.pose.position.y<3.87+0.2&&cu_pos.pose.position.y>1.13-0.2/*&& !goal_one_send*/){ gray=24; }else{ gray=58; } // if(gray > THRESHOLD_MAX) gray = THRESHOLD_MAX; // else if(gray < THRESHOLD_MIN) gray = THRESHOLD_MIN; } frame = func_Threshold(frame,gray); org_frame = frame; for(int dot=2;dot < frame.height()-2;dot+=(frame.height()/16)){ for(int i=0;i<frame.width();i++){ frame.setPixel(i,dot,QColor(0,255,0).rgb()); } } //reset memset(Position,0,sizeof(Position)/sizeof(Position[0][0])); //Segmentation for(int h=2;h < frame.height()-2;h+=(frame.height()/16)){ bw_sw = compareBlackWhite(org_frame,h); if(bw_sw==0){ Segmentation1(org_frame,&frame,h,255); }else if(bw_sw==1){ Segmentation1(org_frame,&frame,h,0); } } if(push_p>2){ for(int i=0;i<push_p;i++){ total_x += Position[0][i]; total_y += Position[1][i]; } double vector_x=total_x/push_p; double vector_y=total_y/push_p; double vector_h=frame.height()-1-vector_y; double vector_w=(ROBOT_CENTER-1-vector_x)/vector_h; for(int i=0;i<vector_h;i++){ frame.setPixel(ROBOT_CENTER-1-vector_w*i,frame.height()-1-i,QColor(0,0,255).rgb()); } final_w = vector_x - (ROBOT_CENTER); final_h = frame.height() - 1 - vector_y; int sensor_x,sensor_y=100; sensor_x = sensorPoint(org_frame,&frame,sensor_y); final_w += sensor_x; sensor_y=379; final_h += sensor_y; // if(sensor_x!=0) final_h += sensor_y; sensor_y=380; sensor_x = sensorPoint(org_frame,&frame,sensor_y); final_w += sensor_x; sensor_y=99; final_h += sensor_y; // if(sensor_x!=0) final_h += sensor_y; ang=atan(final_w/final_h); frame.setPixel(vector_x-1,vector_y-1,QColor(0,0,255).rgb()); frame.setPixel(vector_x-1,vector_y,QColor(0,0,255).rgb()); frame.setPixel(vector_x-1,vector_y+1,QColor(0,0,255).rgb()); frame.setPixel(vector_x,vector_y-1,QColor(0,0,255).rgb()); frame.setPixel(vector_x,vector_y,QColor(0,0,255).rgb()); frame.setPixel(vector_x,vector_y+1,QColor(0,0,255).rgb()); frame.setPixel(vector_x+1,vector_y-1,QColor(0,0,255).rgb()); frame.setPixel(vector_x+1,vector_y,QColor(0,0,255).rgb()); frame.setPixel(vector_x+1,vector_y+1,QColor(0,0,255).rgb()); vec3.z = ang; }else{ vec3.z = 999; } //reset total_x =0,total_y =0,push_p =0;//,h_max=0; final_w =0,final_h =0; total_black=0;total_white=0; cvGray = QImageToCvMat(frame); // cv::imshow(WINDOW,cvGray); // cv::waitKey(1); //ROS pub_m.publish(vec3); } loop_rate.sleep(); ros::spinOnce(); } }
int main(int argc, char* argv[]) { signal(SIGINT, quitFunction); // Simple parsing of the parameters related to the image acquisition int xRes = 640; int yRes = 480; int cameraIndex = 0; if (argc > 2) { xRes = std::atoi(argv[1]); yRes = std::atoi(argv[2]); } if (argc > 3) { cameraIndex = std::atoi(argv[3]); } // The source of input images capture.open(cameraIndex); if (!capture.isOpened()) { std::cerr << "Unable to initialise video capture." << std::endl; return 1; } #ifdef OPENCV3 capture.set(cv::CAP_PROP_FRAME_WIDTH, xRes); capture.set(cv::CAP_PROP_FRAME_HEIGHT, yRes); #else capture.set(CV_CAP_PROP_FRAME_WIDTH, xRes); capture.set(CV_CAP_PROP_FRAME_HEIGHT, yRes); #endif cv::Mat inputImage; // The tag detection happens in the Chilitags class. chilitags::Chilitags chilitags; // The detection is not perfect, so if a tag is not detected during one frame, // the tag will shortly disappears, which results in flickering. // To address this, Chilitags "cheats" by keeping tags for n frames // at the same position. When tags disappear for more than 5 frames, // Chilitags actually removes it. // Here, we cancel this to show the raw detection results. chilitags.setFilter(0, 0.0f); cv::namedWindow("DisplayChilitags"); // Main loop, exiting when 'q is pressed' for (; 'q' != (char) cv::waitKey(1) && sRunning; ) { // Capture a new image. capture.read(inputImage); // Start measuring the time needed for the detection int64 startTime = cv::getTickCount(); // Detect tags on the current image (and time the detection); // The resulting map associates tag ids (between 0 and 1023) // to four 2D points corresponding to the corners positions // in the picture. chilitags::TagCornerMap tags = chilitags.find(inputImage); // Measure the processing time needed for the detection int64 endTime = cv::getTickCount(); float processingTime = 1000.0f*((float) endTime - startTime)/cv::getTickFrequency(); // Now we start using the result of the detection. // First, we set up some constants related to the information overlaid // on the captured image const static cv::Scalar COLOR(255, 0, 255); // OpenCv can draw with sub-pixel precision with fixed point coordinates static const int SHIFT = 16; static const float PRECISION = 1<<SHIFT; // We dont want to draw directly on the input image, so we clone it cv::Mat outputImage = inputImage.clone(); for (const std::pair<int, chilitags::Quad> & tag : tags) { int id = tag.first; // We wrap the corner matrix into a datastructure that allows an // easy access to the coordinates const cv::Mat_<cv::Point2f> corners(tag.second); // We start by drawing the borders of the tag for (size_t i = 0; i < 4; ++i) { cv::line( outputImage, PRECISION*corners(i), PRECISION*corners((i+1)%4), #ifdef OPENCV3 COLOR, 1, cv::LINE_AA, SHIFT); #else COLOR, 1, CV_AA, SHIFT); #endif } // Other points can be computed from the four corners of the Quad. // Chilitags are oriented. It means that the points 0,1,2,3 of // the Quad coordinates are consistently the top-left, top-right, // bottom-right and bottom-left corners. // (i.e. clockwise, starting from top-left) // Using this, we can compute (an approximation of) the center of // tag. cv::Point2f center = 0.5f*(corners(0) + corners(2)); cv::putText(outputImage, cv::format("%d", id), center, cv::FONT_HERSHEY_SIMPLEX, 0.5f, COLOR); } // Some stats on the current frame (resolution and processing time) cv::putText(outputImage, cv::format("%dx%d %4.0f ms (press q to quit)", outputImage.cols, outputImage.rows, processingTime), cv::Point(32,32), cv::FONT_HERSHEY_SIMPLEX, 0.5f, COLOR); // Finally... cv::imshow("DisplayChilitags", outputImage); } cv::destroyWindow("DisplayChilitags"); capture.release(); return 0; }
/** * @function main */ int main( int argc, char* argv[] ) { // Filter gFilterLimits.resize(6); //gFilterLimits << -0.35, 0.35, -0.70, 0.70, 1.5, 2.4; // Kinect gFilterLimits << -1.0, 1.0, -1.5, 1.5, 0.35, 2.0; // Asus on top of Crichton ObjectsDatabase mOd; mOd.init_classifier(); mOd.load_dataset(); gCapture.open( cv::CAP_OPENNI2 ); if( !gCapture.isOpened() ) { printf("\t [ERROR] Could not open the capture object \n"); return -1; } gCapture.set( cv::CAP_PROP_OPENNI2_MIRROR, 0.0 ); gCapture.set( cv::CAP_PROP_OPENNI_REGISTRATION, -1.0 ); gF = (float)gCapture.get( cv::CAP_OPENNI_DEPTH_GENERATOR_FOCAL_LENGTH ); cv::namedWindow( gWindowName, cv::WINDOW_AUTOSIZE ); ::google::InitGoogleLogging( argv[0] ); for(;;) { if( !gCapture.grab() ) { printf("\t * ERROR Could not grab a frame \n"); return -1; } gCapture.retrieve( gRgbImg, cv::CAP_OPENNI_BGR_IMAGE ); if( gIsSegmentedFlag ) { drawSegmented(); } cv::imshow( gWindowName, gRgbImg ); gCapture.retrieve( gPclMap, cv::CAP_OPENNI_POINT_CLOUD_MAP ); cv::imshow( gWindowName, gRgbImg ); char k = cv::waitKey(30); if( k == 'q' ) { printf("\t Finishing program \n"); break; } /** Recognize */ else if( k == 'i' ) { // Process image process(); gLabels.resize(gClusters.size() ); gIndex.resize(gClusters.size() ); // Store images for( int i = 0; i < gClusters.size(); ++i ) { int xl = gBoundingBoxes[i](0); int yl = gBoundingBoxes[i](1); int xw = gBoundingBoxes[i](2)-gBoundingBoxes[i](0); int yw = gBoundingBoxes[i](3)-gBoundingBoxes[i](1); cv::Mat img( gRgbImg, cv::Rect( xl, yl, xw, yw ) ); // Predict mOd.classify( img, gIndex[i], gLabels[i] ); cv::putText( gRgbImg, gLabels[i], cv::Point(gBoundingBoxes[i](0), gBoundingBoxes[i](1) ), cv::FONT_HERSHEY_SIMPLEX, 1, gColors[i], 2 ); mOd.sayIt(gIndex[i]); } } // else } // for } // main
void Preprocessing_Wrap::VideoCaptureOpen(void) { cap.open(0); }