Exemplo n.º 1
0
void mono_handler(const lcm_recv_buf_t *rbuf, const char* channel, const lcmt_stereo *msg, void *user) {
    // open the frame of the video specified by the message

    // check to see if the current video is the correct video file

    if (current_video_number != msg->video_number) {

        video_capture.release();

        std::string newfile = GetMonoFilename(msg->timestamp, msg->video_number);

        if (newfile.empty()) {
            return;
        }

        std::cout << "Opening file: " << newfile << std::endl;

        if (!video_capture.open(newfile)) {
            std::cerr << "Failed to open file: " << newfile << std::endl;
            return;
        }
        current_video_number = msg->video_number;
    }

    video_capture.set(CV_CAP_PROP_POS_FRAMES, msg->frame_number + frame_offset);

    cv::Mat frame;

    video_capture >> frame;

    SendImageOverLcm(lcm_, image_channel, frame);

}
Exemplo n.º 2
0
void initVideoStream(cv::VideoCapture &cap)
{
	if (cap.isOpened())
		cap.release();

	cap.open(0); // open the default camera
}
Exemplo n.º 3
0
//-----------------------------------------------------------------------------
void ShutdownCamera (const char sCameraImg[])
{
/*capDriverDisconnect(hgCapWnd);
lprintf("\nDelete temporary file %s\n", sCameraImg);
unlink(sCameraImg);*/
	
	cvDestroyAllWindows();
	cam.release();
}
Exemplo n.º 4
0
 ~VideoCaptureManager()
 {
     cap_.release();
 }
Exemplo n.º 5
0
void finish_opencv()
{
	capture.release();
	capture_img.release();
}
int main()
{
    double timeUse;
    struct timeval startTime, stopTime;

    cv::Mat rawImage, grayImage;
    std::vector<cv::Rect> faces;

    init();
    spider_head(45);

    std::stringstream logStream, outStream;
    float faceX, faceY;
    int8_t rotateDegree;
    uint8_t rotatePwm;
    uint8_t lostCounter = 0;
    while(running)
    {
        logStream.str("");
        logStream << std::fixed << std::setprecision(3);
        outStream.str("");
        outStream << std::fixed << std::setprecision(3);

        gettimeofday(&startTime, NULL);

        camera.retrieve(rawImage);

        cv::cvtColor(rawImage, grayImage, cv::COLOR_BGR2GRAY);
        cv::equalizeHist(grayImage, grayImage);

        faces.clear();
        face_cascade.detectMultiScale(grayImage, faces, 1.1,
            2, 0|cv::CASCADE_SCALE_IMAGE, cv::Size(30, 30));
        if(faces.empty())
        {
            if(lostCounter != 0)
            {
                lostCounter --;
                spider_rotate_degree(rotateDegree, rotatePwm, NULL, NULL);

                logStream << "Face lost, lost counter: " << static_cast<int>(lostCounter) << ", ";
            }
            else
            {
                spider_move_stop();
                
                logStream << "No face!";
            }
        }
        else
        {
            lostCounter = 5;
            faceX = faces[0].x+faces[0].width*0.5;
            faceY = faces[0].y+faces[0].height*0.5;

            logStream << "Get face, size: " << faces.size() << ", ";
            logStream << "coordinate: x " << faceX << " y " << faceY;

            if(faceX < 80)
            {
                rotateDegree = -5;
                rotatePwm = 80;
            }
            else if(faceX > 80)
            {
                rotateDegree = 5;
                rotatePwm = 80;
            }

            if(faceX < 70 || faceX > 90)
            {
                spider_rotate_degree(rotateDegree, rotatePwm, NULL, NULL);
            }

            //spider_move(1, 55);
        }

        gettimeofday(&stopTime, NULL);
        timeUse = stopTime.tv_sec - startTime.tv_sec + (stopTime.tv_usec - startTime.tv_usec)/1000000.0;
        if(timeUse < 0.25)
            usleep((0.25 - timeUse) * 1000000);

        outStream << "Time use: " << timeUse << "s, " << logStream.str();
        std::cout << outStream.str() << std::endl;
    }

    void* result;
    pthread_join(grabThread, &result);

    spider_head(35);
    spider_move_stop();
    spider_rotate_stop();
    spider_close();

    camera.release();

    std::cout << "Program exit!" << std::endl;

    return 0;
}
Exemplo n.º 7
0
int main(int argc, char* argv[])
{
    signal(SIGINT, quitFunction);

    // Simple parsing of the parameters related to the image acquisition
    int xRes = 640;
    int yRes = 480;
    int cameraIndex = 0;
    if (argc > 2) {
        xRes = std::atoi(argv[1]);
        yRes = std::atoi(argv[2]);
    }
    if (argc > 3) {
        cameraIndex = std::atoi(argv[3]);
    }

    // The source of input images
    capture.open(cameraIndex);
    if (!capture.isOpened())
    {
        std::cerr << "Unable to initialise video capture." << std::endl;
        return 1;
    }
#ifdef OPENCV3
    capture.set(cv::CAP_PROP_FRAME_WIDTH, xRes);
    capture.set(cv::CAP_PROP_FRAME_HEIGHT, yRes);
#else
    capture.set(CV_CAP_PROP_FRAME_WIDTH, xRes);
    capture.set(CV_CAP_PROP_FRAME_HEIGHT, yRes);
#endif
    cv::Mat inputImage;

    // The tag detection happens in the Chilitags class.
    chilitags::Chilitags chilitags;

    // The detection is not perfect, so if a tag is not detected during one frame,
    // the tag will shortly disappears, which results in flickering.
    // To address this, Chilitags "cheats" by keeping tags for n frames
    // at the same position. When tags disappear for more than 5 frames,
    // Chilitags actually removes it.
    // Here, we cancel this to show the raw detection results.
    chilitags.setFilter(0, 0.0f);

    cv::namedWindow("DisplayChilitags");
    // Main loop, exiting when 'q is pressed'
    for (; 'q' != (char) cv::waitKey(1) && sRunning; ) {

        // Capture a new image.
        capture.read(inputImage);

        // Start measuring the time needed for the detection
        int64 startTime = cv::getTickCount();

        // Detect tags on the current image (and time the detection);
        // The resulting map associates tag ids (between 0 and 1023)
        // to four 2D points corresponding to the corners positions
        // in the picture.
        chilitags::TagCornerMap tags = chilitags.find(inputImage);

        // Measure the processing time needed for the detection
        int64 endTime = cv::getTickCount();
        float processingTime = 1000.0f*((float) endTime - startTime)/cv::getTickFrequency();


        // Now we start using the result of the detection.

        // First, we set up some constants related to the information overlaid
        // on the captured image
        const static cv::Scalar COLOR(255, 0, 255);
        // OpenCv can draw with sub-pixel precision with fixed point coordinates
        static const int SHIFT = 16;
        static const float PRECISION = 1<<SHIFT;

        // We dont want to draw directly on the input image, so we clone it
        cv::Mat outputImage = inputImage.clone();

        for (const std::pair<int, chilitags::Quad> & tag : tags) {

            int id = tag.first;
            // We wrap the corner matrix into a datastructure that allows an
            // easy access to the coordinates
            const cv::Mat_<cv::Point2f> corners(tag.second);

            // We start by drawing the borders of the tag
            for (size_t i = 0; i < 4; ++i) {
                cv::line(
                    outputImage,
                    PRECISION*corners(i),
                    PRECISION*corners((i+1)%4),
#ifdef OPENCV3
                    COLOR, 1, cv::LINE_AA, SHIFT);
#else
                    COLOR, 1, CV_AA, SHIFT);
#endif
            }

            // Other points can be computed from the four corners of the Quad.
            // Chilitags are oriented. It means that the points 0,1,2,3 of
            // the Quad coordinates are consistently the top-left, top-right,
            // bottom-right and bottom-left corners.
            // (i.e. clockwise, starting from top-left)
            // Using this, we can compute (an approximation of) the center of
            // tag.
            cv::Point2f center = 0.5f*(corners(0) + corners(2));
            cv::putText(outputImage, cv::format("%d", id), center,
                        cv::FONT_HERSHEY_SIMPLEX, 0.5f, COLOR);
        }

        // Some stats on the current frame (resolution and processing time)
        cv::putText(outputImage,
                    cv::format("%dx%d %4.0f ms (press q to quit)",
                               outputImage.cols, outputImage.rows,
                               processingTime),
                    cv::Point(32,32),
                    cv::FONT_HERSHEY_SIMPLEX, 0.5f, COLOR);

        // Finally...
        cv::imshow("DisplayChilitags", outputImage);
    }

    cv::destroyWindow("DisplayChilitags");
    capture.release();

    return 0;
}
Exemplo n.º 8
0
void quitFunction(int sig) {
    std::cout << "Caught interrupt. Closing" << std::endl;
    capture.release();
    sRunning = false;
}
Exemplo n.º 9
0
void MyInitFrame::OnCloseWindow(wxCloseEvent& event) {
	cap.release();
	CloseHandle(hVideo);
	Close(TRUE);
	exit(0);
}