Ejemplo n.º 1
0
    /******************************************************************************
    Description.: this thread worker grabs a frame and copies it to the global buffer
    Input Value.: unused
    Return Value: unused, always NULL
     ******************************************************************************/
    void *cam_thread(void *arg) {
        
        g_settings.init();
        setCameraExposure();
        
        CVideoFrame* pFrame = NULL;

#ifndef TEST_USE_JPEGS_NOT_CAMERA 
        int width = VIEW_PIXEL_X_WIDTH;
        int height = VIEW_PIXEL_Y_HEIGHT;
        IplImage * img = cvCreateImage(cvSize(width, height), IPL_DEPTH_8U, 3); // obraz OpenCV
#endif

        frameGrinder.init();

#ifdef TEST_USE_JPEGS_NOT_CAMERA 
        std::string sBasePath = "/home/";
        sBasePath += HOME_NAME;
        std::string sPath = sBasePath;
        sPath += "/0243-20150125-22-21-46.jpg";
        //sPath += "/0007-20150125-22-36-25.jpg";  
        cv::Mat frame1 = cv::imread(sPath.c_str(), CV_LOAD_IMAGE_COLOR);
        if (frame1.empty()) {
            dbgMsg_s("Failed to read image data from a file1\n");
        }

        sPath = sBasePath;
        sPath += "/0243-20150125-22-21-46.jpg";
        //sPath += "/0007-20150125-22-36-25.jpg";  
        cv::Mat frame2 = cv::imread(sPath.c_str(), CV_LOAD_IMAGE_COLOR);
        if (frame2.empty()) {
            dbgMsg_s("Failed to read image data from a file2\n");
        }
        bool toggle = false;
#endif

        context *pcontext = (context*) arg;
        pglobal = pcontext->pglobal;

        /* set cleanup handler to cleanup allocated ressources */
        pthread_cleanup_push(cam_cleanup, pcontext);

        while (!pglobal->stop) {
            while (pcontext->videoIn->streamingState == STREAMING_PAUSED) {
                usleep(1); // maybe not the best way so FIXME
            }

#ifdef TEST_USE_JPEGS_NOT_CAMERA 
            if (frameGrinder.safeGetFreeFrame(&pFrame)) {
                if (toggle) {
                    pFrame->m_frame = frame1;
                } else {
                    pFrame->m_frame = frame2;
                }
                toggle = (!toggle);
                if (!pFrame->m_frame.empty()) {
                    frameGrinder.safeAddTail(pFrame, CVideoFrame::FRAME_QUEUE_WAIT_FOR_BLOB_DETECT);
                } else {
                    dbgMsg_s("Frame is empty\n");
                    frameGrinder.safeAddTail(pFrame, CVideoFrame::FRAME_QUEUE_FREE);
                }
                frameGrinder.m_testMonitor.m_nTasksDone[CTestMonitor::TASK_DONE_CAMERA]++;
            }

#else
            /* grab a frame */
            if (uvcGrab(pcontext->videoIn) < 0) {
                IPRINT("Error grabbing frames\n");
                exit(EXIT_FAILURE);
            }

            DBG("received frame of size: %d from plugin: %d\n", pcontext->videoIn->buf.bytesused, pcontext->id);

            /*
             * Workaround for broken, corrupted frames:
             * Under low light conditions corrupted frames may get captured.
             * The good thing is such frames are quite small compared to the regular pictures.
             * For example a VGA (640x480) webcam picture is normally >= 8kByte large,
             * corrupted frames are smaller.
             */
            if (pcontext->videoIn->buf.bytesused < minimum_size) {
                DBG("dropping too small frame, assuming it as broken\n");
                continue;
            }

            if (g_settings.isDynamicSettingsEnabled())
            {
                g_settings.getValueFromFile(CSetting::SETTING_EXPOSURE);
            }
            if(g_settings.isValueChanged(CSetting::SETTING_EXPOSURE))
            {
                setCameraExposure();
            }

#ifdef NO_CV_JUST_STREAM_THE_CAMERA

            /* copy JPG picture to global buffer */
            pthread_mutex_lock(&pglobal->in[pcontext->id].db);

            /*
             * If capturing in YUV mode convert to JPEG now.
             * This compression requires many CPU cycles, so try to avoid YUV format.
             * Getting JPEGs straight from the webcam, is one of the major advantages of
             * Linux-UVC compatible devices.
             */
            if (pcontext->videoIn->formatIn == V4L2_PIX_FMT_YUYV) {
                DBG("compressing frame from input: %d\n", (int) pcontext->id);
                pglobal->in[pcontext->id].size = compress_yuyv_to_jpeg(pcontext->videoIn, pglobal->in[pcontext->id].buf, pcontext->videoIn->framesizeIn, gquality);
            } else {
                DBG("copying frame from input: %d\n", (int) pcontext->id);
                pglobal->in[pcontext->id].size = memcpy_picture(pglobal->in[pcontext->id].buf, pcontext->videoIn->tmpbuffer, pcontext->videoIn->buf.bytesused);
            }

            /* copy this frame's timestamp to user space */
            pglobal->in[pcontext->id].timestamp = pcontext->videoIn->buf.timestamp;

            /* signal fresh_frame */
            pthread_cond_broadcast(&pglobal->in[pcontext->id].db_update);
            pthread_mutex_unlock(&pglobal->in[pcontext->id].db);


#else // #ifndef NO_CV_JUST_STREAM_THE_CAMERA

            if (frameGrinder.safeGetFreeFrame(&pFrame)) {
                std::vector<uchar> vectordata(pcontext->videoIn->tmpbuffer, pcontext->videoIn->tmpbuffer + (height * width));
                cv::Mat data_mat(vectordata, false);
                cv::Mat image(cv::imdecode(data_mat, 1)); //put 0 if you want greyscale
                pFrame->m_frame = image;
                if (!pFrame->m_frame.empty()) {
                    frameGrinder.safeAddTail(pFrame, CVideoFrame::FRAME_QUEUE_WAIT_FOR_BLOB_DETECT);
                } else {
                    dbgMsg_s("Frame is empty\n");
                    frameGrinder.safeAddTail(pFrame, CVideoFrame::FRAME_QUEUE_FREE);
                }
                frameGrinder.m_testMonitor.m_nTasksDone[CTestMonitor::TASK_DONE_CAMERA]++;
            }

#endif  // #ifndef NO_CV_JUST_STREAM_THE_CAMERA

#endif   // TEST_USE_JPEGS_NOT_CAMERA
        }

        DBG("leaving input thread, calling cleanup function now\n");
        pthread_cleanup_pop(1);

        return NULL;
    }
void CUpperGoalDetector::detectBlobs(CVideoFrame * pFrame, CFrameGrinder* pFrameGrinder)
{
    try
    {
        static struct timespec timeLastCameraFrame = {0};
        static struct timespec timeNow = {0};
        static cv::Scalar lowerBounds = cv::Scalar(79,0,150);
	static cv::Scalar upperBounds = cv::Scalar(96,255,250);
        
        cv::Mat img_hsv, img_blur, goal_blob;
        static int iCount = 0;
        
        int timeSinceLastCameraFrameMilliseconds = (int) CTestMonitor::getDeltaTimeMilliseconds(
                timeLastCameraFrame,
                pFrame->m_timeAddedToQueue[(int) CVideoFrame::FRAME_QUEUE_WAIT_FOR_BLOB_DETECT]);
        timeLastCameraFrame = pFrame->m_timeAddedToQueue[(int) CVideoFrame::FRAME_QUEUE_WAIT_FOR_BLOB_DETECT];

        // RBG is flawed as a way to filter based on color because the brightness is combined 
        // with the color info. 
        // Not so with HSV, where Hue and Saturation are maintained separately
        // OpenCV has a handy conversion from RGB to HSV
        cv::cvtColor(pFrame->m_frame, img_hsv, CV_BGR2HSV);
        
        cv::GaussianBlur(img_hsv, img_blur, cv::Size(5,5),1.5);

       // Look for the green hue we are emitting from the LED halo 
        if(g_settings.isDynamicSettingsEnabled())
        {
             g_settings.getValueFromFile(CSetting::SETTING_FILTER_HUE_LOWER_BOUND);
             g_settings.getValueFromFile(CSetting::SETTING_FILTER_HUE_UPPER_BOUND);
        }
        if(g_settings.isValueChanged(CSetting::SETTING_FILTER_HUE_LOWER_BOUND))
        {
            lowerBounds = cv::Scalar(g_settings.getSetting(CSetting::SETTING_FILTER_HUE_LOWER_BOUND),0,150);
        }
        if(g_settings.isValueChanged(CSetting::SETTING_FILTER_HUE_UPPER_BOUND))
        {
            upperBounds = cv::Scalar(g_settings.getSetting(CSetting::SETTING_FILTER_HUE_UPPER_BOUND),255,250);
        }

        // Find the bright response from the retro-reflective tape
        cv::inRange(img_blur, lowerBounds, upperBounds, goal_blob);
        pFrame->m_filteredFrame = goal_blob.clone();
            
        iCount++;
        if ((iCount % 17) == 0)
        {
            pFrameGrinder->m_testMonitor.saveFrameToJpeg(pFrame->m_filteredFrame);
        }

        //Find the contours. Use the contourOutput Mat so the original image doesn't get overwritten
        cv::vector<std::vector<cv::Point> > goalContours;
        cv::findContours(goal_blob, goalContours, CV_RETR_LIST, CV_CHAIN_APPROX_SIMPLE);
        
        CUpperGoalRectangle upperGoalRectangle;
        float upperGoalAzimuthDegrees = 0.0;
        float distanceToUpperGoalInches = 0.0;
        bool isUpperGoalFound = false;
        isUpperGoalFound = filterContours(goalContours, pFrame->m_frame.rows, pFrame->m_frame.cols,
                upperGoalRectangle, upperGoalAzimuthDegrees, distanceToUpperGoalInches);
       
        CTestMonitor::getTicks(&timeNow);
        int timeLatencyThisCameraFrameMilliseconds = (int) CTestMonitor::getDeltaTimeMilliseconds(
                pFrame->m_timeAddedToQueue[(int) CVideoFrame::FRAME_QUEUE_WAIT_FOR_BLOB_DETECT],
                timeNow);

        pFrame->m_targetInfo.updateTargetInfo(
                timeSinceLastCameraFrameMilliseconds, timeLatencyThisCameraFrameMilliseconds, 
                isUpperGoalFound, upperGoalAzimuthDegrees, distanceToUpperGoalInches, upperGoalRectangle.center.x);

        pFrame->updateAnnotationInfo(upperGoalRectangle);

        m_gpioLed.setGreenLED(isUpperGoalFound, pFrame->m_timeRemovedFromQueue[(int) CVideoFrame::FRAME_QUEUE_WAIT_FOR_BLOB_DETECT]);
    }
    catch (...)
    {
    }
}