// private method which sends the created image to the projector
static void drawToProjector()
{
	// Display Image
	IplImage* img;
	cvNamedWindow( "Projection", CV_WINDOW_NORMAL );
	cvSetWindowProperty("Projection", CV_WND_PROP_FULLSCREEN, CV_WINDOW_FULLSCREEN);
	img = cvLoadImage( "arrow_test.jpg", CV_LOAD_IMAGE_UNCHANGED );
	cvShowImage("Projection", img);
	//cvWaitKey(150);
}
Esempio n. 2
0
// Timer Callback - uiImage_timer
//# Start uiImage_timer_operation Marker
void user_input_imager::uiImage_timer_operation(const NAMESPACE::TimerEvent& event)
{
#ifdef USE_ROSMOD
  comp_queue.ROSMOD_LOGGER->log("DEBUG", "Entering user_input_imager::uiImage_timer_operation");
#endif
  // Business Logic for uiImage_timer_operation
  agse_package::captureImage arg;
  Mat camera_feed;

  if (this->captureImage_client.call(arg)) {

    ROS_INFO("Capture Image Client Call Successful; Height: %d, Width: %d ", arg.response.height, arg.response.width);
    
    camera_feed = Mat(arg.response.height, 
		      arg.response.width, 
		      CV_8UC3, 
		      arg.response.imgVector.data());

    // Mat to IplImage *
    IplImage ipltemp = camera_feed;
    cvCopy(&ipltemp, processed_image);
    cvResize(processed_image, Mode_1);
    cvShowImage( "UIP", Mode_1);
    cvNamedWindow( "UIP", 1 );
    cvSetWindowProperty("UIP", CV_WND_PROP_FULLSCREEN, CV_WINDOW_FULLSCREEN);
  }

  key = 0;
  key = cvWaitKey(1);

  // RAW CAMERA FEED
  if (key == 65361) {
    ROS_INFO("Mode 1 Activated");
    cvShowImage( "UIP", Mode_1);
  }
  /*
  // SAMPLE PROCESSED IMAGE
  else if (key == 65363) {
    ROS_INFO("Mode 2 Activated");

    // Mat to IplImage *
    processed_image = cvCreateImage(cvSize(sample_rawImage.cols, sample_rawImage.rows), 8, 3);
    IplImage ipltemp = sample_rawImage;
    cvCopy(&ipltemp, processed_image);

    cvResize(processed_image, Mode_2);
    cvShowImage( "UIP", Mode_2);
    cvNamedWindow( "UIP", 1 );
    cvSetWindowProperty("UIP", CV_WND_PROP_FULLSCREEN, CV_WINDOW_FULLSCREEN);
    key = 0;

  }

  else if (key == 65362) {

    ROS_INFO("Mode 3 Activated");

    // Mat to IplImage *
    processed_image = cvCreateImage(cvSize(pb_rawImage.cols, pb_rawImage.rows), 8, 3);
    IplImage ipltemp = pb_rawImage;
    cvCopy(&ipltemp, processed_image);

    cvResize(processed_image, Mode_3);
    cvShowImage( "UIP", Mode_3);
    cvNamedWindow( "UIP", 1 );
    cvSetWindowProperty("UIP", CV_WND_PROP_FULLSCREEN, CV_WINDOW_FULLSCREEN);
    key = 0;
  }

  else if (key == 65364) {

    ROS_INFO("Mode 4 Activated"); 

    // Mat to IplImage *
    processed_image = cvCreateImage(cvSize(pb_gsImage.cols, pb_gsImage.rows), 8, 3);
    IplImage ipltemp = pb_gsImage;
    cvCopy(&ipltemp, processed_image);

    cvResize(processed_image, Mode_4);
    cvShowImage( "UIP", Mode_4);
    cvNamedWindow( "UIP", 1 );
    cvSetWindowProperty("UIP", CV_WND_PROP_FULLSCREEN, CV_WINDOW_FULLSCREEN);
    key = 0;
  }

  else if (key == 13) {
      
    ROS_INFO("Mode 5 Activated");
    cvShowManyImages("UIP", 4, top_left, top_right, bottom_left, bottom_right);
    key = 0;
  }
  */
  else {
    ROS_INFO("No Mode Activated");
    cvShowImage( "UIP", Mode_1);
  }

#ifdef USE_ROSMOD
  comp_queue.ROSMOD_LOGGER->log("DEBUG", "Exiting user_input_imager::uiImage_timer_operation");
#endif
}
Esempio n. 3
0
void cv::setWindowProperty(const std::string& winname, int prop_id, double prop_value)
{
    cvSetWindowProperty( winname.c_str(), prop_id, prop_value);
}
/*******************************************************************************************************************//**
 * @brief Program entry point
 *
 * Handles image processing and display of annotated results
 *
 * @param[in] argc command line argument count
 * @param[in] argv command line argument vector
 * @returnS return status
 * @author Christopher D. McMurrough
 **********************************************************************************************************************/
int main(int argc, char** argv)
{
    // validate and parse the command line arguments
    int cameraIndex = 0;
    bool displayMode = true;
    bool flipDisplay = false;
    if(argc != NUM_COMNMAND_LINE_ARGUMENTS + 1)
    {
        std::printf("USAGE: <camera_index> <display_mode>\n");
        std::printf("Running with default parameters... \n");
    }
    else
    {
        cameraIndex = atoi(argv[1]);
        displayMode = atoi(argv[2]) > 0;
        flipDisplay = atoi(argv[2]) == 2;
    }

    // initialize the eye camera video capture
//    cv::VideoCapture occulography(cameraIndex);
    cv::VideoCapture occulography("pupil_test.mp4");

    if(!occulography.isOpened())
    {
        std::printf("Unable to initialize camera %u! \n", cameraIndex);
        return 0;
    }
    // THESE ARE FOR GETTING THE PROPERTIES OF THE CAMERA IF APPLICABLE

    std::cout<<"width "<< occulography.get(CV_CAP_PROP_FRAME_WIDTH)  << std::endl;
    std::cout<<"height "<< occulography.get(CV_CAP_PROP_FRAME_WIDTH)  << std::endl;

    

    // std::cout<<"format "<< occulography.get(CV_CAP_PROP_FORMAT)  << std::endl;
    // std::cout<<"fps "<< occulography.get(CV_CAP_PROP_FPS)  << std::endl;
    // std::cout<<"brightness "<< occulography.get(CV_CAP_PROP_BRIGHTNESS)  << std::endl;
    // std::cout<<"contrast "<< occulography.get(CV_CAP_PROP_CONTRAST)  << std::endl;
    // std::cout<<"saturation "<< occulography.get(CV_CAP_PROP_SATURATION)  << std::endl;
    // std::cout<<"hue "<< occulography.get(CV_CAP_PROP_HUE)  << std::endl;
    // std::cout<<"gain "<< occulography.get(CV_CAP_PROP_GAIN)  << std::endl;
    // std::cout<<"exposure "<< occulography.get(CV_CAP_PROP_EXPOSURE)  << std::endl;
    // std::cout<<"convert rgb "<< occulography.get(CV_CAP_PROP_CONVERT_RGB)  << std::endl;

    // THESE ARE FOR SETTING THE PROPERTIES OF THE CAMERA IF APPLICABLE
    occulography.set(CV_CAP_PROP_FRAME_WIDTH, CAMERA_FRAME_WIDTH);
    occulography.set(CV_CAP_PROP_FRAME_HEIGHT, CAMERA_FRAME_HEIGHT);
    // occulography.set(CV_CAP_PROP_FORMAT, CAMERA_FORMAT);
    // occulography.set(CV_CAP_PROP_FPS, CAMERA_FPS);
    occulography.set(CV_CAP_PROP_BRIGHTNESS, CAMERA_BRIGHTNESS);
    occulography.set(CV_CAP_PROP_CONTRAST, CAMERA_CONTRAST);
    occulography.set(CV_CAP_PROP_SATURATION, CAMERA_SATURATION);
    // occulography.set(CV_CAP_PROP_HUE, CAMERA_HUE);
    // occulography.set(CV_CAP_PROP_GAIN, CAMERA_GAIN);
    // occulography.set(CV_CAP_PROP_EXPOSURE, CAMERA_EXPOSURE);
    // occulography.set(CV_CAP_PROP_CONVERT_RGB, CAMERA_CONVERT_RGB);

    // intialize the display window if necessary
    if(displayMode)
    {
        cvNamedWindow("eyeImage", CV_WINDOW_NORMAL);
        cvSetWindowProperty("eyeImage", CV_WND_PROP_FULLSCREEN, CV_WINDOW_NORMAL);
        cvSetWindowProperty("eyeImage", CV_WND_PROP_AUTOSIZE, CV_WINDOW_NORMAL);
        cvSetWindowProperty("eyeImage", CV_WND_PROP_ASPECTRATIO, CV_WINDOW_KEEPRATIO);
    }

    // create the pupil tracking object
    PupilTracker tracker;
    tracker.setDisplay(displayMode);

    // store the frame data
    cv::Mat eyeImage;
    //struct PupilData result;
    bool trackingSuccess = false;

    // store the time between frames
    int frameStartTicks, frameEndTicks, processStartTicks, processEndTicks;
    float processTime, totalTime;

    // process data until program termination
    bool isRunning = true;
    while(isRunning)
    {
        // start the timer
        frameStartTicks = clock();

        // attempt to acquire an image frame
        if(occulography.read(eyeImage))
        {
            // process the image frame
            processStartTicks = clock();
            trackingSuccess = tracker.findPupil(eyeImage);
            processEndTicks = clock();
            processTime = ((float)(processEndTicks - processStartTicks)) / CLOCKS_PER_SEC;

            // warn on tracking failure
            if(!trackingSuccess)
            {
                std::cout<< "Unable to locate pupil!" << std::endl;
            }

            // update the display
            if(displayMode)
            {
                cv::Mat displayImage(eyeImage);

                // annotate the image if tracking was successful
                if(trackingSuccess)
                {
                    // draw the pupil center and boundary
                    //cvx::cross(displayImage, result.pupil_center, 5, COLOR_RED);
                    cv::ellipse(displayImage, tracker.getEllipseRectangle(), COLOR_BLUE, 2);

                    // shade the pupil area
                    cv::Mat annotation(eyeImage.rows, eyeImage.cols, CV_8UC3, 0.0);
                    cv::RotatedRect my_rotated_rect_property;
                    my_rotated_rect_property = tracker.getEllipseRectangle();
                    cv::Point2f Center;
                    Center = my_rotated_rect_property.center;

                    double x1, y1, x2, y2;
                    
                    x1 = Center.x - 40; y1 = Center.y - 40; x2 = Center.x + 40; y2 = Center.y + 40;

                    // Draw a square/rectangle around the ellipse based off of the center 
                    // This is for the rectangle
                    cv::Point x1y1 = cv::Point(x1, y1);
                    cv::Point x2y2 = cv::Point(x2, y2);
                    // This is for the lines around the rectangle 
                    cv::Point x1y2 = cv::Point(x1, y2);
                    cv::Point x2y1 = cv::Point(x2, y1);

                    // here is the math to draw the lines 
                    // for the horizontal line
                    cv::Point horizontal_start = cv::Point(x1-20, (y1+y2)/2);
                    cv::Point horizontal_end = cv::Point(x1+100, (y1+y2)/2);
                    
                    // for the vertical line
                    cv::Point vertical_start = cv::Point((x1+x2)/2, y1-20);
                    cv::Point vertical_end = cv::Point((x1+x2)/2, y1+100);
                    
                    // done with the lines

                    cv::ellipse(annotation, tracker.getEllipseRectangle(), COLOR_PURPLE, -1);
                    const double alpha = 0.7;
                    cv::addWeighted(displayImage, alpha, annotation, 1.0 - alpha, 0.0, displayImage);
                    // The centre is actually a tiny circle (neat trick)
                    cv::circle(displayImage, Center, 2.5, COLOR_RED, 2, 4);
                    // The bounding rectangle based off of the centre
                    cv::rectangle(displayImage, x1y1, x2y2, COLOR_GREEN, 1);
                    // The lines across the rectangle
                    cv::line(displayImage, horizontal_start, horizontal_end, COLOR_GREEN, 1);
                    cv::line(displayImage, vertical_start, vertical_end, COLOR_GREEN, 1);
                    int fontFace = cv::FONT_HERSHEY_PLAIN;
                    double scale = 1.5;
                    int thickness = 1;

                    int baseline = 0;
                    char pupil_center[25];
                    char ellipse_size[25];
                    char pupil_angle[25];
                    sprintf(pupil_center, "Pupil Center: (%.2lf, %.2lf)",  Center.x, Center.y);
                    sprintf(ellipse_size, "Pupil Size: (%.2lf, %.2lf)",  my_rotated_rect_property.size.width,my_rotated_rect_property.size.height);
 					sprintf(pupil_angle, "Pupil Angle: (%.2lf)",  my_rotated_rect_property.angle);
                    cv::putText(displayImage, "OptiFind :: Real Time Eye Tracker", cv::Point(40, 40), fontFace, 1, COLOR_WHITE, thickness, 8);
                    cv::putText(displayImage, pupil_center, cv::Point(350, 315), fontFace, 1, COLOR_WHITE, thickness, 8);
                    cv::putText(displayImage, ellipse_size, cv::Point(350, 330), fontFace, 1, COLOR_WHITE, thickness, 8);
                    cv::putText(displayImage, pupil_angle, cv::Point(350, 345), fontFace, 1, COLOR_WHITE, thickness, 8);
                 }

                if(flipDisplay)
                {
                    // annotate the image
                    cv::Mat displayFlipped;
                    cv::flip(displayImage, displayFlipped, 1);
                    cv::imshow("eyeImage", displayFlipped);

                    // display the annotated image
                    isRunning = cv::waitKey(1) != 'q';
                    char key = cvWaitKey(10);
                    // If the user pressed 'ESC' Key then break the loop and exit the program
                    if (key == 27)
                        break;
                    displayFlipped.release();
                }
                else
                {
                    // display the image
                    cv::imshow("eyeImage", displayImage);
                    //char key1 = cvWaitKey(0);
                    isRunning = cv::waitKey(1) != 'q';
                    char key = cvWaitKey(10);
                    // If the user pressed 'ESC' Key then break the loop and exit the program
                    if (key == 27)
                        break;
                }
                // release display image
                displayImage.release();
            }
        }
        else
        {
            std::cout<<"WARNING: Unable to capture image from source!\n"<<std::endl;
            std::cout<< "Reading the video from frame 0 again!" << std::endl;
            occulography.set(CV_CAP_PROP_POS_FRAMES, 0);
            continue;
        }

        // stop the timer and print the elapsed time
        // frameEndTicks = clock();
        // totalTime = ((float)(frameEndTicks - frameStartTicks)) / CLOCKS_PER_SEC;
        // std::printf("Processing time (pupil, total) (result x,y): %.4f %.4f - %.2f %.2f\n", processTime, totalTime, tracker.getEllipseRectangle().center.x, tracker.getEllipseRectangle().center.y);
        //std::cout<< "Processing time (pupil)"<< processTime << "total time" <<  totalTime << "pupil center" << tracker.getEllipseRectangle().center << std::endl;
       
    }

    // release the video source before exiting
    occulography.release();
}
Esempio n. 5
0
void demo(char *cfgfile, char *weightfile, float thresh, int cam_index, const char *filename, char **names, int classes, int delay, char *prefix, int avg_frames, float hier, int w, int h, int frames, int fullscreen)
{
    demo_delay = delay;
    demo_frame = avg_frames;
    predictions = calloc(demo_frame, sizeof(float*));
    image **alphabet = load_alphabet();
    demo_names = names;
    demo_alphabet = alphabet;
    demo_classes = classes;
    demo_thresh = thresh;
    demo_hier = hier;
    printf("Demo\n");
    net = parse_network_cfg(cfgfile);
    if(weightfile){
        load_weights(&net, weightfile);
    }
    set_batch_network(&net, 1);
    pthread_t detect_thread;
    pthread_t fetch_thread;

    srand(2222222);

    if(filename){
        printf("video file: %s\n", filename);
        cap = cvCaptureFromFile(filename);
    }else{
        cap = cvCaptureFromCAM(cam_index);

        if(w){
            cvSetCaptureProperty(cap, CV_CAP_PROP_FRAME_WIDTH, w);
        }
        if(h){
            cvSetCaptureProperty(cap, CV_CAP_PROP_FRAME_HEIGHT, h);
        }
        if(frames){
            cvSetCaptureProperty(cap, CV_CAP_PROP_FPS, frames);
        }
    }

    if(!cap) error("Couldn't connect to webcam.\n");

    layer l = net.layers[net.n-1];
    demo_detections = l.n*l.w*l.h;
    int j;

    avg = (float *) calloc(l.outputs, sizeof(float));
    last_avg  = (float *) calloc(l.outputs, sizeof(float));
    last_avg2 = (float *) calloc(l.outputs, sizeof(float));
    for(j = 0; j < demo_frame; ++j) predictions[j] = (float *) calloc(l.outputs, sizeof(float));

    boxes = (box *)calloc(l.w*l.h*l.n, sizeof(box));
    probs = (float **)calloc(l.w*l.h*l.n, sizeof(float *));
    for(j = 0; j < l.w*l.h*l.n; ++j) probs[j] = (float *)calloc(l.classes+1, sizeof(float));

    buff[0] = get_image_from_stream(cap);
    buff[1] = copy_image(buff[0]);
    buff[2] = copy_image(buff[0]);
    buff_letter[0] = letterbox_image(buff[0], net.w, net.h);
    buff_letter[1] = letterbox_image(buff[0], net.w, net.h);
    buff_letter[2] = letterbox_image(buff[0], net.w, net.h);
    ipl = cvCreateImage(cvSize(buff[0].w,buff[0].h), IPL_DEPTH_8U, buff[0].c);

    int count = 0;
    if(!prefix){
        cvNamedWindow("Demo", CV_WINDOW_NORMAL); 
        if(fullscreen){
            cvSetWindowProperty("Demo", CV_WND_PROP_FULLSCREEN, CV_WINDOW_FULLSCREEN);
        } else {
            cvMoveWindow("Demo", 0, 0);
            cvResizeWindow("Demo", 1352, 1013);
        }
    }

    demo_time = get_wall_time();

    while(!demo_done){
        buff_index = (buff_index + 1) %3;
        if(pthread_create(&fetch_thread, 0, fetch_in_thread, 0)) error("Thread creation failed");
        if(pthread_create(&detect_thread, 0, detect_in_thread, 0)) error("Thread creation failed");
        if(!prefix){
            if(count % (demo_delay+1) == 0){
                fps = 1./(get_wall_time() - demo_time);
                demo_time = get_wall_time();
                float *swap = last_avg;
                last_avg  = last_avg2;
                last_avg2 = swap;
                memcpy(last_avg, avg, l.outputs*sizeof(float));
            }
            display_in_thread(0);
        }else{
            char name[256];
            sprintf(name, "%s_%08d", prefix, count);
            save_image(buff[(buff_index + 1)%3], name);
        }
        pthread_join(fetch_thread, 0);
        pthread_join(detect_thread, 0);
        ++count;
    }
}
Esempio n. 6
0
void graphicalJoystick(int joy_idx, SDL_Joystick *joy, FILE* fichierKernel){

		const char* original = "FAJA" ;
	char key ;
	CvCapture *capture ;
	IplImage* cap ;
	IplImage* cap_resize ;

	capture = cvCreateCameraCapture(CV_CAP_ANY);
	
	if(!capture){
		printf("! Error while capturing from camera !\n");
		exit(1);
	}	
	
	CvFont cross,faja;

cvInitFont(&cross,CV_FONT_HERSHEY_SIMPLEX|CV_FONT_ITALIC, 1,1,0,1,1);
cvInitFont(&faja,CV_FONT_HERSHEY_SIMPLEX|CV_FONT_ITALIC, 1,1,0,2,3);

	cap = cvQueryFrame(capture);
	cap_resize = cvCreateImage(cvSize(1366,768), cap->depth, cap->nChannels);

cvNamedWindow(original, CV_WINDOW_NORMAL);
	cvMoveWindow(original, 0, 0);
	cvSetWindowProperty(original, CV_WND_PROP_FULLSCREEN, CV_WINDOW_FULLSCREEN);
        
        int num_axes    = SDL_JoystickNumAxes(joy) -1;
        int num_buttons = SDL_JoystickNumButtons(joy);
        int num_hats    = SDL_JoystickNumHats(joy);
FILE* f = NULL;

        f=fopen("/dev/servoblaster", "w");
        if (f == NULL)
          exit(1);
        int quit = 0;
        SDL_Event event;
        while(!quit)
        {
          SDL_Delay(10);

          bool something_new = false;
          while (SDL_PollEvent(&event)) {
			  
			  cap = cvQueryFrame(capture);
		cvPutText (cap,"+",cvPoint(cap->width/2,cap->height/2), &cross, cvScalar(0,255,0,0));
		cvPutText (cap,"FAJA - Mode manuel",cvPoint(10,30), &faja, cvScalar(255,0,0,0));
		cvResize(cap, cap_resize, CV_INTER_LINEAR);
	

		cvShowImage(original, cap_resize);
            something_new = true;
            switch(event.type)
            {
              case SDL_JOYAXISMOTION:
                assert(event.jaxis.axis < num_axes+1);
                axes[event.jaxis.axis] = event.jaxis.value;
                break;

              case SDL_JOYBUTTONDOWN:
              case SDL_JOYBUTTONUP:
                assert(event.jbutton.button < num_buttons);
                buttons[event.jbutton.button] = event.jbutton.state;
                break;

              case SDL_JOYHATMOTION:
                assert(event.jhat.hat < num_hats);
                hats[event.jhat.hat] = event.jhat.value;
                break;

              case SDL_QUIT:
                quit = 1;
                printf("Recieved interrupt, exiting\n");
                break;

              default:
                fprintf(stderr, "Error: Unhandled event type: %d\n", event.type);
                break;
            }
          }
          if (something_new)
          {
            //clear();
            
            res = 50+(((-1 * (axes[0])+32768)*190)/65535);
                fprintf(f,"1=%i\n",(int)res);
                printf("echo 1=%i > /dev/servoblaster\n", (int) res);
                fflush(f);

                res = 50+(((-1 * (axes[1])+32768)*190)/65535);
                fprintf(f,"0=%i\n", (int)res);
                printf("echo 0=%i > /dev/servoblaster\n", (int) res);
                fflush(f);   
     
            }
          }
        } // while
Esempio n. 7
0
int main(int argc, char **argv) {
    cv::Mat frame;
    cv::Mat orig;
    cv::Mat fore;

    time_t camAdaptationStartTime = time(NULL);
    bool camAdapted = false;

    std::vector <std::vector<cv::Point>> contours;

    EnableCameraAutoAdjust(GetVideoNum(argc, argv));
    cv::VideoCapture cap(GetVideoNum(argc, argv));
    cv::Ptr<cv::BackgroundSubtractor> bgsub;

    cvNamedWindow("Contador de Votos", CV_WINDOW_NORMAL);
    cvSetWindowProperty("Contador de Votos", CV_WND_PROP_AUTOSIZE, CV_WINDOW_AUTOSIZE);
    ScreenSize ss = GetScreenSize();
    cvResizeWindow("Contador de Votos", ss.width, ss.height);

    cap >> frame;

    const int squareSize = 150;
    const int squareMargin = 10;

    cv::Point leftOrigin(squareMargin, squareMargin);
    cv::Point rightOrigin(frame.size().width - squareSize - squareMargin, squareMargin);

    InteractiveObject left(cv::Rect(leftOrigin.x, leftOrigin.y, squareSize, squareSize));
    InteractiveObject right(cv::Rect(rightOrigin.x, rightOrigin.y, squareSize, squareSize));

    cv::Mat yesc = cv::imread("SWC.png", CV_LOAD_IMAGE_COLOR);
    cv::resize(yesc, yesc, cv::Size(squareSize, squareSize));

    cv::Mat yesbw = cv::imread("vader.jpg", CV_LOAD_IMAGE_COLOR);
    cv::resize(yesbw, yesbw, cv::Size(squareSize, squareSize));

    cv::Mat noc = cv::imread("STC.png", CV_LOAD_IMAGE_COLOR);
    cv::resize(noc, noc, cv::Size(squareSize, squareSize));

    cv::Mat nobw = cv::imread("spock.png", CV_LOAD_IMAGE_COLOR);
    cv::resize(nobw, nobw, cv::Size(squareSize, squareSize));

    cv::Mat vote = cv::imread("vote.png", CV_LOAD_IMAGE_COLOR);
    cv::resize(vote,vote, cv::Size(squareSize, squareSize));

   // VideoCapture video("Game.mp4");

    while(true) {
        cap >> frame;
        cv::flip(frame, frame, 1);
        frame.copyTo(orig);

	/*Mat game;
	video >> game;
	cv::resize(game,game, cv::Size(squareSize, squareSize));
	game.copyTo(orig(cv::Rect((rightOrigin.x + leftOrigin.x)/2, (rightOrigin.y + leftOrigin.y)/2 + 2*game.size().height , game.size().width, game.size().height)));*/

        if (camAdapted) {
            cv::blur(frame, frame, cv::Size(3, 3));
            cv::blur(frame, frame, cv::Size(5, 5));
            cv::erode( frame, frame, cv::Mat());
            cv::erode( frame, frame, cv::Mat());

            cv::Mat cropped;
            cv::Rect roi(0, 0, frame.size().width, 150);
            frame(roi).copyTo(cropped);

            cv::Mat dst;
            bgsub->apply(cropped, dst, 0.0001);

            cv::threshold(dst, dst, 230, 255, CV_THRESH_BINARY);
            cv::findContours(dst , contours, CV_RETR_EXTERNAL, CV_CHAIN_APPROX_SIMPLE);


            std::vector<std::vector<cv::Point> >hull( contours.size() );
            for( int i = 0; i < contours.size(); i++ )
                {  cv::convexHull( cv::Mat(contours[i]), hull[i], false ); }

            int counter = 0;

            bool leftActive = false;
            bool rightActive = false;
            for( int i = 0; i< contours.size(); i++ )
            {
               cv::Scalar color( 255, 0, 255);
               if (cv::contourArea(hull[i]) > 600) {
                    cv::drawContours( orig, hull, i, color, 1, 8, std::vector<cv::Vec4i>(), 0, cv::Point() );
                    counter++;
               }
            }

            left.ProcessHulls(hull);
            right.ProcessHulls(hull);

		 std::string text = "VOTE";
                cv::Size txtSz = cv::getTextSize(text, cv::FONT_HERSHEY_DUPLEX, 4, 4, NULL);
                //cv::putText(orig, text, cv::Point(orig.size().width / 2 - txtSz.width /2, orig.size().height - 2* txtSz.height), cv::FONT_HERSHEY_PLAIN, 7, cv::Scalar(0, 255, 255), 4);


		vote.copyTo(orig(cv::Rect((rightOrigin.x + leftOrigin.x)/2, (rightOrigin.y + leftOrigin.y)/2, vote.size().width, vote.size().height)));


            if (!(left.IsActive() && right.IsActive())) {
                char buff[11];
                sprintf(buff, "%02d", left.GetCount());
                cv::putText(orig, buff, cv::Point(40, orig.size().height - 40), cv::FONT_HERSHEY_PLAIN, 4, cv::Scalar(0, 255, 0), 4);


                sprintf(buff, "%02d", right.GetCount());
                cv::Size txtSz = cv::getTextSize(buff, cv::FONT_HERSHEY_PLAIN, 4, 4, NULL);
                cv::putText(orig, buff, cv::Point(orig.size().width - txtSz.width - 40, orig.size().height - 40), cv::FONT_HERSHEY_PLAIN, 4, cv::Scalar(0, 255, 0), 4);



                left.Draw(orig);
                right.Draw(orig);

                if (right.IsCounted()) {
                    yesc.copyTo(orig(cv::Rect(rightOrigin.x, rightOrigin.y, yesc.size().width, yesc.size().height)));
                }
                else {
                    yesbw.copyTo(orig(cv::Rect(rightOrigin.x, rightOrigin.y, yesbw.size().width, yesbw.size().height)));
                }

                if (left.IsCounted()) {
                    noc.copyTo(orig(cv::Rect(leftOrigin.x, leftOrigin.y, noc.size().width, noc.size().height)));
                }
                else {
                    nobw.copyTo(orig(cv::Rect(leftOrigin.x, leftOrigin.y, nobw.size().width, nobw.size().height)));

                }

		int totalVotes = right.GetCount() + left.GetCount();
		std::string out = "FotosVote/votacao" + std::to_string(totalVotes) + ".png";

		imwrite(out, orig);

            }
            else {
                left.Deactivate();
                right.Deactivate();

               /* std::string text = "Fraude!";
                cv::Size txtSz = cv::getTextSize(text, cv::FONT_HERSHEY_PLAIN, 4, 4, NULL);
                cv::putText(orig, text, cv::Point(orig.size().width / 2 - txtSz.width / 2, orig.size().height /2), cv::FONT_HERSHEY_PLAIN, 4, cv::Scalar(0, 0, 255), 4);*/
            }
        }
        else {
            if ((time(NULL) - camAdaptationStartTime) > ADAPTATION_TIME_SEC) {
                    camAdapted = true;
                    bgsub = cv::createBackgroundSubtractorMOG2();
                    DisableCameraAutoAdjust(GetVideoNum(argc, argv));
            }
            else {
                std::string text = "Configurando...";
                cv::Size txtSz = cv::getTextSize(text, cv::FONT_HERSHEY_PLAIN, 4, 4, NULL);
                cv::putText(orig, text, cv::Point(orig.size().width / 2 - txtSz.width / 2, orig.size().height /2 - 2* txtSz.height), cv::FONT_HERSHEY_PLAIN, 4, cv::Scalar(0, 0, 255), 4);

                char buff[3];
                sprintf(buff, "%d", ADAPTATION_TIME_SEC - abs(time(NULL) - camAdaptationStartTime));
                txtSz = cv::getTextSize(buff, cv::FONT_HERSHEY_PLAIN, 4, 4, NULL);
                cv::putText(orig, buff, cv::Point(orig.size().width / 2 - txtSz.width / 2, orig.size().height /2 ), cv::FONT_HERSHEY_PLAIN, 4, cv::Scalar(0, 0, 255), 4);
            }
        }

        imshow("Contador de Votos", orig);

        int key = cv::waitKey(30);
        if ((key & 0xFF) == 27) {
                exit(0);
        }
        else if ((key & 0xFF) == ' '){
                camAdapted = false;
                EnableCameraAutoAdjust(GetVideoNum(argc, argv));
                camAdaptationStartTime = time(NULL);
        }
        else if ((key & 0xFF) == 'c') {
                imwrite("out.png", orig);
                system("python sendmail.py");
        }



    }

    return 0;
}
Esempio n. 8
0
int main (void)
{
  MyFreenectDevice * freenect;
  Freenect::Freenect freeNect;
  IplImage * tmp2 = cvCreateImage(cvSize(640, 480), 8, 3);
  IplImage * tmp = cvCreateImage(cvSize(800, 600), 8, 3);
  CvFont font;
  int selected = 1;
  Menu * menu = new Menu(5);
  cvInitFont(&font, CV_FONT_HERSHEY_SIMPLEX, 1.0, 1.0, 0, 5);
  freenect = &freeNect.createDevice<MyFreenectDevice>(0);

  cvNamedWindow("fingers",  CV_WINDOW_NORMAL);
  cvSetWindowProperty("fingers", CV_WND_PROP_FULLSCREEN, CV_WINDOW_FULLSCREEN);
  cvNamedWindow("surface");
  cvResizeWindow("fingers", 800, 600);
  cvSet(tmp, CV_RGB(255, 255, 255));
  cvMoveWindow("fingers", 0, 1050);
  cvMoveWindow("surface", 0, 0);
  cvSet(tmp, CV_RGB(255, 255, 255));
  cvShowImage("fingers", tmp);
  cvNamedWindow("rgb");
  cvMoveWindow("rgb", 0 , 480);
  cvNamedWindow("depth");
  cvMoveWindow("depth", 640 , 480);
  freenect->startStream();
  freenect->setTilt(0);
  freenect->setThresh(true, 0, 700);
  int waitTime = 0;

  while (freenect->isKinectRunning())
    {
      if (freenect->isDepthReady() && freenect->isRgbReady())
	{
	  cvSet(tmp, CV_RGB(255, 255, 255));
	  cvZero(tmp2);
	  IplImage * img =  (IplImage*)cvClone(freenect->fetchKinectRgbFrame());
	  
	  if (freenect->isCalibrated() == false && !freenect->getSurface())
	    {
	      sleep(1);
	      freenect->calibrate();
	    }
	  else if (freenect->isCalibrated() == false)
	    {
	      if (waitTime < 30)
		{
		  cvPutText(tmp, "Initiating manual calibration",
			    cvPoint(250, 200), &font, CV_RGB(255, 0 , 0));
		  cvPutText(tmp, "due to bad lighting conditions", 
			    cvPoint(250, 300), &font, CV_RGB(255, 0 , 0));
		  ++waitTime;
		}
	    }
	  else
	    {
	      cvSet(tmp, CV_RGB(0, 0, 0));
	      freenect->calcFingerPositions();
	      std::list<Finger> fList = freenect->getFingerList();
	      std::list<CvPoint> fListRaw = freenect->getRawFingerList();

	      for (std::list<CvPoint>::iterator it = fListRaw.begin() ; it != fListRaw.end() ; ++it)
	      	{
		  cvCircle(freenect->fetchKinectDepthFrame(), cvPoint(it->x, it->y),
			   10, CV_RGB(0, 255, 0), CV_FILLED);
	      	}
	      std::vector<CvPoint> twoFirst;
	      for (std::list<Finger>::iterator it = fList.begin() ; it != fList.end() ;)
	      	{
		  cvCircle(tmp, cvPoint(it->x, it->y), 10, CV_RGB(255, 0, 0), CV_FILLED);
		  ++it;
		}
	      menu->interpretGesture(fList);
	    }
	  menu->drawMenu(tmp);
	  cvShowImage("fingers", tmp);
	  cvShowImage("surface", tmp2);
	  cvShowImage("rgb", freenect->fetchKinectRgbFrame());
	  cvShowImage("depth", freenect->fetchKinectDepthFrame());
	  cvReleaseImage(&img);
	}
      freenect->update();
      int k = freenect->getKey();
      if (k == 27)
	freenect->setRunning(false);
    }
  freenect->stopStream();
  cvDestroyAllWindows();
  exit(0);
  return 0;
}
Esempio n. 9
0
void WebcamHandler::run()
{

	// initialize webcam
	VideoCapture cap = VideoCapture(0);
	cap.set(CV_CAP_PROP_FRAME_WIDTH, m_frameWidth);
	cap.set(CV_CAP_PROP_FRAME_HEIGHT, m_frameHeight);	

	// initialize window
	namedWindow("Settings", CV_WINDOW_AUTOSIZE);
	namedWindow("FaceRepair", CV_WINDOW_NORMAL);
	cvSetWindowProperty("FaceRepair", CV_WND_PROP_FULLSCREEN, CV_WINDOW_FULLSCREEN);

	cvWaitKey(1000);

	float* hidden;
	float* visible;

	while (m_loop)
	{
		// read frame and continue with next frame if not successfull
		Mat frame;
		cap.retrieve(frame);
		flip(frame, frame, 1);

		// take subimage at faceArea
		Mat subimage;
		frame(*m_faceArea).copyTo(subimage);
		Mat subimageHSV;
		cvtColor(subimage, subimageHSV, COLOR_BGR2HSV); //Convert the captured frame from BGR to HSV

		// detect color
		Mat mask;
		inRange(subimageHSV, *m_detectionColorMin, *m_detectionColorMax, mask);
		erode(mask, mask, getStructuringElement(MORPH_ELLIPSE, Size(5, 5)));
		dilate(mask, mask, getStructuringElement(MORPH_ELLIPSE, Size(15, 15)));
		Mat invertedMask = 255 - mask;

		// scale to rbm input size
		Size size = Size(m_edgeLength, m_edgeLength);
		Mat scaledSubimage;	
		resize(subimage, scaledSubimage, size, 0.0, 0.0, INTER_LINEAR);
		Mat scaledMask;
		resize(mask, scaledMask, size, 0.0, 0.0, INTER_NEAREST);
		Mat invertedScaledMask = 255 - scaledMask;

		// calc mean rgb of preserved area
		Scalar bgr = mean(scaledSubimage, invertedScaledMask);

		// set mean rgb at reconstructionArea
		scaledSubimage.setTo(bgr, scaledMask);

		// subimage to normalized float array
		visible = matToNormalizedFloatArrayWithBias(&scaledSubimage);

		// process RBMs
		hidden = m_rbm1000->runHidden(visible, 1);
		delete visible;
		hidden[0] = 1;
		visible = m_rbm1000->runVisible(hidden, 1);
		delete hidden;
		visible[0] = 1;
		resetPreservedArea(&scaledSubimage, &invertedScaledMask, visible);

		hidden = m_rbm1500->runHidden(visible, 1);
		delete visible;
		hidden[0] = 1;
		visible = m_rbm1500->runVisible(hidden, 1);
		delete hidden;
		visible[0] = 1;
		resetPreservedArea(&scaledSubimage, &invertedScaledMask, visible);

		hidden = m_rbm2000->runHidden(visible, 1);
		delete visible;
		hidden[0] = 1;
		visible = m_rbm2000->runVisible(hidden, 1);
		delete hidden;

		// normalized float array to subimage
		normalizedFloatArrayToMatWithoutBias(visible, &scaledSubimage);

		// scale to original faceArea size
		Mat result;
		size = Size(m_faceArea->width, m_faceArea->height);
		resize(scaledSubimage, result, size, 0.0, 0.0, INTER_CUBIC);

		// reset pixels of preserved area in native resolution
		subimage.copyTo(result, invertedMask);

		// create fullscreen image
		Mat fs;
		frame.copyTo(fs);
		result.copyTo(fs(*m_faceArea));
		flip(fs, fs, 1);
		
		// maybe not necessary
		//result.copyTo(frame(*m_faceArea));
		
		// paint visualizations for settings image
		rectangle(frame, *m_faceArea, Scalar(0, 255, 0));
		Point* eyePositions = calculateEyePositions(m_faceArea, m_relativeEyePositionX, m_relativeEyePositionY);
		circle(frame, eyePositions[0], 4, Scalar(255, 255, 0));
		circle(frame, eyePositions[1], 4, Scalar(255, 255, 0));
		delete eyePositions;

		// show frames
		imshow("Settings", frame);
		imshow("FaceRepair", fs);
		
		// check keyboard input
		checkKeys();
	}
	// terminate webcam
	cap.release();
}
Esempio n. 10
0
void cv::setWindowProperty(const String& winname, int prop_id, double prop_value)
{
    CV_TRACE_FUNCTION();
    cvSetWindowProperty( winname.c_str(), prop_id, prop_value);
}
Esempio n. 11
0
/*******************************************************************************************************************//**
 * @brief Program entry point
 *
 * Handles image processing and display of annotated results
 *
 * @param[in] argc command line argument count
 * @param[in] argv command line argument vector
 * @returnS return status
 * @author Christopher D. McMurrough
 **********************************************************************************************************************/
int main(int argc, char** argv)
{
    // validate and parse the command line arguments
    int cameraIndex = 0;
    bool displayMode = true;
    bool flipDisplay = false;
    if(argc != NUM_COMNMAND_LINE_ARGUMENTS + 1)
    {
        std::printf("USAGE: <camera_index> <display_mode>\n");
        std::printf("Running with default parameters... \n");
    }
    else
    {
        cameraIndex = atoi(argv[1]);
        displayMode = atoi(argv[2]) > 0;
        flipDisplay = atoi(argv[2]) == 2;
    }

    // initialize the eye camera video capture
    //cv::VideoCapture occulography(cameraIndex);
   cv::VideoCapture occulography("pupil_test.mp4");
   // cv::VideoCapture occulography("test.mov");
    if(!occulography.isOpened())
    {
        std::printf("Unable to initialize camera %u! \n", cameraIndex);
        return 0;
    }
    occulography.set(CV_CAP_PROP_FRAME_WIDTH, CAMERA_FRAME_WIDTH);
    occulography.set(CV_CAP_PROP_FRAME_HEIGHT, CAMERA_FRAME_HEIGHT);
    occulography.set(CV_CAP_PROP_FORMAT, CAMERA_FORMAT);
    occulography.set(CV_CAP_PROP_FPS, CAMERA_FPS);
    occulography.set(CV_CAP_PROP_BRIGHTNESS, CAMERA_BRIGHTNESS);
    occulography.set(CV_CAP_PROP_CONTRAST, CAMERA_CONTRAST);
    occulography.set(CV_CAP_PROP_SATURATION, CAMERA_SATURATION);
    occulography.set(CV_CAP_PROP_HUE, CAMERA_HUE);
    occulography.set(CV_CAP_PROP_GAIN, CAMERA_GAIN);
    occulography.set(CV_CAP_PROP_EXPOSURE, CAMERA_EXPOSURE);
    occulography.set(CV_CAP_PROP_CONVERT_RGB, CAMERA_CONVERT_RGB);

    // intialize the display window if necessary
    if(displayMode)
    {
        cvNamedWindow("eyeImage", CV_WINDOW_NORMAL);
        cvSetWindowProperty("eyeImage", CV_WND_PROP_FULLSCREEN, CV_WINDOW_NORMAL);
        cvSetWindowProperty("eyeImage", CV_WND_PROP_AUTOSIZE, CV_WINDOW_NORMAL);
        cvSetWindowProperty("eyeImage", CV_WND_PROP_ASPECTRATIO, CV_WINDOW_KEEPRATIO);
    }

    // create the pupil tracking object
    PupilTracker tracker;
    tracker.setDisplay(displayMode);

    // store the frame data
    cv::Mat eyeImage;
    //struct PupilData result;
    bool trackingSuccess = false;

    // store the time between frames
    int frameStartTicks, frameEndTicks, processStartTicks, processEndTicks;
    float processTime, totalTime;

    // process data until program termination
    bool isRunning = true;
    while(isRunning)
    {
        // start the timer
        frameStartTicks = clock();

        // attempt to acquire an image frame
        if(occulography.read(eyeImage))
        {
            // process the image frame
            processStartTicks = clock();
            trackingSuccess = tracker.findPupil(eyeImage);
            processEndTicks = clock();
            processTime = ((float)(processEndTicks - processStartTicks)) / CLOCKS_PER_SEC;

            // warn on tracking failure
            if(!trackingSuccess)
            {
                std::printf("Unable to locate pupil! \n");
            }

            // update the display
            if(displayMode)
            {
                cv::Mat displayImage(eyeImage);

                // annotate the image if tracking was successful
                if(trackingSuccess)
                {
                    // draw the pupil center and boundary
                    //cvx::cross(displayImage, result.pupil_center, 5, COLOR_RED);
                    cv::ellipse(displayImage, tracker.getEllipseRectangle(), COLOR_RED);

                    // shade the pupil area
                    cv::Mat annotation(eyeImage.rows, eyeImage.cols, CV_8UC3, 0.0);
                    cv::ellipse(annotation, tracker.getEllipseRectangle(), COLOR_MAGENTA, -1);
                    const double alpha = 0.7;
                    cv::addWeighted(displayImage, alpha, annotation, 1.0 - alpha, 0.0, displayImage);
                }

                if(flipDisplay)
                {
                    // annotate the image
                    cv::Mat displayFlipped;
                    cv::flip(displayImage, displayFlipped, 1);
                    cv::imshow("eyeImage", displayFlipped);

                    // display the annotated image
                    isRunning = cv::waitKey(1) != 'q';
                    displayFlipped.release();
                }
                else
                {
                    // display the image
                    cv::imshow("eyeImage", displayImage);
                    isRunning = cv::waitKey(1) != 'q';
                }

                // release display image
                displayImage.release();
            }
        }
        else
        {
            std::printf("WARNING: Unable to capture image from source!\n");
            occulography.set(CV_CAP_PROP_POS_FRAMES, 0);
            continue;
        }

        // stop the timer and print the elapsed time
        frameEndTicks = clock();
        totalTime = ((float)(frameEndTicks - frameStartTicks)) / CLOCKS_PER_SEC;
        //std::printf("Processing time (pupil, total) (result x,y): %.4f %.4f - %.2f %.2f\n", processTime, totalTime, tracker.getEllipseCentroid().x, tracker.getEllipseCentroid().y);
    }

    // release the video source before exiting
    occulography.release();
}
Esempio n. 12
0
void demo(char *cfgfile, char *weightfile, float thresh, int cam_index, const char *filename, char **names, int classes, int frame_skip, char *prefix, float hier, int w, int h, int frames, int fullscreen)
{
    //skip = frame_skip;
    image **alphabet = load_alphabet();
    int delay = frame_skip;
    demo_names = names;
    demo_alphabet = alphabet;
    demo_classes = classes;
    demo_thresh = thresh;
    demo_hier = hier;
    printf("Demo\n");
    net = parse_network_cfg(cfgfile);
    if(weightfile){
        load_weights(&net, weightfile);
    }
    set_batch_network(&net, 1);

    srand(2222222);

    if(filename){
        printf("video file: %s\n", filename);
        cap = cvCaptureFromFile(filename);
    }else{
        cap = cvCaptureFromCAM(cam_index);

        if(w){
            cvSetCaptureProperty(cap, CV_CAP_PROP_FRAME_WIDTH, w);
        }
        if(h){
            cvSetCaptureProperty(cap, CV_CAP_PROP_FRAME_HEIGHT, h);
        }
        if(frames){
            cvSetCaptureProperty(cap, CV_CAP_PROP_FPS, frames);
        }
    }

    if(!cap) error("Couldn't connect to webcam.\n");

    layer l = net.layers[net.n-1];
    int j;

    avg = (float *) calloc(l.outputs, sizeof(float));
    for(j = 0; j < FRAMES; ++j) predictions[j] = (float *) calloc(l.outputs, sizeof(float));
    for(j = 0; j < FRAMES; ++j) images[j] = make_image(1,1,3);

    boxes = (box *)calloc(l.w*l.h*l.n, sizeof(box));
    probs = (float **)calloc(l.w*l.h*l.n, sizeof(float *));
    for(j = 0; j < l.w*l.h*l.n; ++j) probs[j] = (float *)calloc(l.classes, sizeof(float));

    pthread_t fetch_thread;
    pthread_t detect_thread;

    fetch_in_thread(0);
    det = in;
    det_s = in_s;

    fetch_in_thread(0);
    detect_in_thread(0);
    disp = det;
    det = in;
    det_s = in_s;

    for(j = 0; j < FRAMES/2; ++j){
        fetch_in_thread(0);
        detect_in_thread(0);
        disp = det;
        det = in;
        det_s = in_s;
    }

    int count = 0;
    if(!prefix){
        cvNamedWindow("Demo", CV_WINDOW_NORMAL); 
        if(fullscreen){
            cvSetWindowProperty("Demo", CV_WND_PROP_FULLSCREEN, CV_WINDOW_FULLSCREEN);
        } else {
            cvMoveWindow("Demo", 0, 0);
            cvResizeWindow("Demo", 1352, 1013);
        }
    }

    double before = get_wall_time();

    while(1){
        ++count;
        if(1){
            if(pthread_create(&fetch_thread, 0, fetch_in_thread, 0)) error("Thread creation failed");
            if(pthread_create(&detect_thread, 0, detect_in_thread, 0)) error("Thread creation failed");

            if(!prefix){
                show_image(disp, "Demo");
                int c = cvWaitKey(1);
		if (c != -1) c = c%256;
                if (c == 10){
                    if(frame_skip == 0) frame_skip = 60;
                    else if(frame_skip == 4) frame_skip = 0;
                    else if(frame_skip == 60) frame_skip = 4;   
                    else frame_skip = 0;
                } else if (c == 27) {
                    return;
                } else if (c == 82) {
                    demo_thresh += .02;
                } else if (c == 84) {
                    demo_thresh -= .02;
                    if(demo_thresh <= .02) demo_thresh = .02;
                } else if (c == 83) {
                    demo_hier += .02;
                } else if (c == 81) {
                    demo_hier -= .02;
                    if(demo_hier <= .0) demo_hier = .0;
                }
            }else{
                char buff[256];
                sprintf(buff, "%s_%08d", prefix, count);
                save_image(disp, buff);
            }

            pthread_join(fetch_thread, 0);
            pthread_join(detect_thread, 0);

            if(delay == 0){
                free_image(disp);
                disp  = det;
            }
            det   = in;
            det_s = in_s;
        }else {
            fetch_in_thread(0);
            det   = in;
            det_s = in_s;
            detect_in_thread(0);
            if(delay == 0) {
                free_image(disp);
                disp = det;
            }
            show_image(disp, "Demo");
            cvWaitKey(1);
        }
        --delay;
        if(delay < 0){
            delay = frame_skip;

            double after = get_wall_time();
            float curr = 1./(after - before);
            fps = curr;
            before = after;
        }
    }
}
int main() {
    cv::namedWindow("Planetarium", cv::WINDOW_NORMAL);

    planetarium wall;

    wall.psf_spread_size = 8;

    wall.screen_distance = 5.954;
    wall.screen_width = 1920;
    wall.screen_height = 1080;
    wall.screen_horizontal_pixel_size = 2.364 / wall.screen_width;
    wall.screen_vertical_pixel_size = 1.335 / wall.screen_height;

    // Orion
    wall.attitude_ra = 84*M_PI/180;
    wall.attitude_de = 2*M_PI/180;

    // Juno
    // wall.attitude_ra = -2.885402;
    // wall.attitude_de = 0.133802;
    // Twist: 0.054906

    wall.I_ref = 1.0;
    wall.m_ref = 6.0;
    wall.sigma_0 = 1.0;

    wall.camera_x_offset = 0.0;
    wall.camera_y_offset = 0.0;

    // Load star catalog
    cv::Mat catalog = load_catalog("../hip6.tsv");

    cv::Mat image = wall.render(catalog);

    int k;
    while((k = cvWaitKey(30)) != 27) {
        if (k >= 65361 and k <= 65364) {
            // Left
            if (k == 65361) {
                wall.attitude_ra += 1*M_PI/180;
            }
            // Right
            else if (k == 65363) {
                wall.attitude_ra -= 1*M_PI/180;
            }
            // Up
            else if (k == 65362) {
                wall.attitude_de += 1*M_PI/180;
            }
            // Down
            else if (k == 65364) {
                wall.attitude_de -= 1*M_PI/180;
            }
            std::cout << "Attitude: Right Ascension: " << 180/M_PI * wall.attitude_ra << std::endl;
            std::cout << "          Declination    : " << 180/M_PI * wall.attitude_de << std::endl;
            image = wall.render(catalog);
        } else if (k == 102) {
            cvSetWindowProperty("Planetarium", CV_WND_PROP_FULLSCREEN, CV_WINDOW_FULLSCREEN );
        }

        cv::imshow("Planetarium", image);
    }

    return 0;
}
Esempio n. 14
0
/**

	Fonction qui récupère l'image de la webcam et effectue le traitement en
	faisant appel aux autres fonctions.

**/
void imageTreatement() {
	/*
		Déclaration des variables
	*/
	int i, j ;
	char key ;
	color_pixel_hsv color ;
	CvCapture *capture ;
	//IplImage* cap = cvLoadImage("imagetest.jpg", CV_LOAD_IMAGE_UNCHANGED);
	IplImage* cap ;
	IplImage* cap_resize ;
	IplImage* hsv ;

	coordonnees center ;
	coordonnees barycenter ;
	center.x = center.y = barycenter.x = barycenter.y = 0 ;
	int *barycenter_x ;
	int *barycenter_y ;
	int pixel_tracked_number = 0 ;

	uchar pixel_h ;
	uchar pixel_s ;
	uchar pixel_v ;

	int tolerance_h = 20 ;
	int tolerance_s = 20 ;

	/*

		On capture à partir de la webcam, si la capture echoue un message s'affiche.

	*/
	capture = cvCreateCameraCapture(CV_CAP_ANY);

	if(!capture){
		printf("! Error while capturing from camera !\n");
		exit(1);
	}

	/*
		On capture l'image une seule fois pour connaitre les informations de
		l'image et pouvoir définir la nouvelle image avec résolution plus faible.
	*/
	cap = cvQueryFrame(capture);
	cap_resize = cvCreateImage(cvSize(320, 240), cap->depth, cap->nChannels);

	center_calcul(cap_resize, &center);

	//printf("Width image = %d, Height image = %d, Centre théorique = [%d][%d], Centre calculé = [%d][%d]\n", cap_resize->width, cap_resize->height, (cap_resize->width)/2, (cap_resize->height)/2, center.x, center.y);
	//printf("Width = %d, Height = %d\n", cap_resize->width ,cap_resize->height);*/

	/*
		conversion de l'image original (BVR) en image HSV.
	*/
	//hsv = hsvConvert(cap);

	/*
		Déclaration et création des fenêtres.
	*/
	const char* original = "Image Originale" ;
	const char* hsv_window = "Image HSV" ;

	cvNamedWindow(original, CV_WINDOW_NORMAL);
	cvMoveWindow(original, 0, 0);
	cvSetWindowProperty(original, CV_WND_PROP_FULLSCREEN, CV_WINDOW_FULLSCREEN);
	cvNamedWindow(hsv_window, CV_WINDOW_AUTOSIZE);

	cvCreateTrackbar("Hue Tolerance", "Image Originale", &tolerance_h, 125, NULL);
	cvCreateTrackbar("Saturation Tolerance", "Image Originale", &tolerance_s, 125, NULL);

	// Enlever le commentaire pour afficher la matrice de l'image dans le terminal.
	//showMatImage(hsv);

	/*
		Traitement des images.
	*/
	while(key != 'q' && key != 'Q') {

		pixel_tracked_number = 0 ;

		// On alloue la mémoire pour les tableaux des barycentres.
		barycenter_x = (int*)calloc(cap->width, sizeof(int));
		barycenter_y = (int*)calloc(cap->height, sizeof(int));

		// On met à jour l'image et on la convertis.
		cap = cvQueryFrame(capture);
		cvResize(cap, cap_resize, CV_INTER_LINEAR);
		hsv = hsvConvert(cap_resize);

		// On met les pointeurs sur les images dans la structure "color".
		color.cam = cap_resize ;
		color.hsv = hsv ;

		// Fonction du clic de la souris.
		cvSetMouseCallback(original, mouseEvent, &color);

		// On parcours toute l'image, pixel par pixel.
		for (i = 0 ; i < (hsv->width)*3 ; i=i+3) {
			for (j = 0 ; j < hsv->height ; j++) {

				// Pixels H,S,V en fonction de la position.
				pixel_h = (uchar)(hsv->imageData[((i*3)+(j*hsv->widthStep))]) ;
				pixel_s = (uchar)(hsv->imageData[((i*3)+(j*hsv->widthStep))+1]) ;
				pixel_v = (uchar)(hsv->imageData[((i*3)+(j*hsv->widthStep))+2]) ;

				// Tracking des pixels en fonction de la couleur.
				if(ColorTracking(color, ((i*3)+(j*cap_resize->widthStep)), pixel_h, pixel_s, pixel_v, tolerance_h, tolerance_s)){
					//printf("y = %d, x = %d\n", i/((cap->width)*3), i%((cap->width)*3));

					barycenter_y[j]++ ;

					barycenter_x[i/3]++ ;

					pixel_tracked_number++ ;
				}
			}
		}

		// Calcul des barycentres.
		barycenter = barycenter_calculation(barycenter_x, cap->width, barycenter_y, cap->height, pixel_tracked_number);

		CvPoint p ;
		p.x = barycenter.x ;
		p.y = barycenter.y ;

		cvCircle(cap_resize, p, 5, CV_RGB(0,0,255), -1, 8, 0);

		// Calculé le barycentre et y mettre un point.
		// On met les images dans les fenêtres.
		cvShowImage(original, cap_resize);
		cvShowImage(hsv_window, hsv);

		// Libération de la mémoire des tableaux de barycentres.
		free(barycenter_x);
		free(barycenter_y);

		key = cvWaitKey(10);
	}
}