예제 #1
0
/* Fetch a frame (if available) and process it, calling appropriate 
 callbacks when data becomes available. */
void MarkerCapture::tick(){
    IplImage *thresh_frame = NULL;
    CBlobResult blobs;
    
    // Acquire the lock, update the current frame.
    pthread_mutex_lock(&frame_mutex);
    current_frame = cvCloneImage(cvQueryFrame(camera));
    if(color_acquired && current_frame){
        thresh_frame = apply_threshold(current_frame, target_color);
    }else{
        // create a suplicant.
        thresh_frame = cvCreateImage(cvGetSize(current_frame),IPL_DEPTH_8U,1);
    }
    pthread_mutex_unlock(&frame_mutex);
    // Lock released. Done messing with buffers.
    
    if(frame_update_callback){
        (*frame_update_callback)(this, current_frame, thresh_frame);
    }
    if(color_acquired){
        blobs = detect_blobs(thresh_frame, CV_BLOB_SIZE_MIN);
        if(blobs.GetNumBlobs() >= 2){ // need 2 or more blobs for positional fix.
            MarkerPositionEstimate position;
            // fetch the two largest blobs, by area.
            CBlob blob0, blob1;
            blobs.GetNthBlob(CBlobGetArea(), 0, blob0);
            blobs.GetNthBlob(CBlobGetArea(), 1, blob1);
            // perform positional calculations
            position.distance = distance(blob0, blob1);
            position.angle = angle(blob0, blob1);
            position.blob0_center = blob_center(blob0);
            position.blob1_center = blob_center(blob1);
            // call the update handler.
            if(position_update_callback){
                (*position_update_callback)(this, position);
            }
        }
        blobs.ClearBlobs();
    }
    
    pthread_mutex_lock(&frame_mutex);
    cvReleaseImage(&current_frame);
    cvReleaseImage(&thresh_frame);
    pthread_mutex_unlock(&frame_mutex);
    
    int curr_time = clock();
    fps = CLOCKS_PER_SEC/(double)(curr_time - time);
    time = curr_time;
}
예제 #2
0
void BlobDetection::init()
{
    /** Init is called just after construction. */
    try
    {
        initStatusMask();
        // Create a proxy to ALVideoDevice on the robot.
        ALVideoDeviceProxy* camProxy = new ALVideoDeviceProxy(getParentBroker());
        behavoirProxy = new ALBehaviorManagerProxy(getParentBroker());
        ledProxy = new ALLedsProxy(getParentBroker());
        motionProxy = new ALMotionProxy(getParentBroker());

        initLeds();

        // Subscribe a client image requiring 640*480px and RGB colorspace.
        const std::string cameraID = camProxy->subscribeCamera("camera_01", 0, AL::kVGA, AL::kRGBColorSpace , 10);

        // Create a proxy to ALMemoryProxy on the robot.
        ALMemoryProxy fMemoryProxy = ALMemoryProxy(getParentBroker());
        fMemoryProxy.subscribeToEvent("FrontTactilTouched", "BlobDetection","onFrontTactilTouched");
        fMemoryProxy.subscribeToEvent("MiddleTactilTouched", "BlobDetection","onMiddleTactilTouched");

        HandOrientation rightOrientationLast = NONE;
        HandOrientation leftOrientationLast = NONE;
        HandOrientation rightOrientationCur = NONE, leftOrientationCur = NONE;

        // stand up
        behavoirProxy->runBehavior(STAND);

        // RECODING: prepare vido recording
        /*
        int size;
        std::string arvFile = std::string("/home/nao/video");

        streamHeader tmpStreamHeader;
        std::vector<streamHeader> streamHeaderVector;
        ALVideo videoFile;

        tmpStreamHeader.width = 640;
        tmpStreamHeader.height = 480;
        tmpStreamHeader.colorSpace = AL::kRGBColorSpace; // this is not really necessary, coz in pyuv u decide in which colorspace the vid is shown
        tmpStreamHeader.pixelDepth = 8;
        streamHeaderVector.push_back(tmpStreamHeader);

        std::cout<<"Output arv file properties: "<< streamHeaderVector[0].width <<"x"<< streamHeaderVector[0].height
            <<" Colorspace id:"<< streamHeaderVector[0].colorSpace <<" Pixel depth:"<< streamHeaderVector[0].pixelDepth
            <<std::endl;

        if( !videoFile.recordVideo( arvFile, 0, streamHeaderVector ) ) {
            std::cout<<"Error writing "<< arvFile <<" file."<<std::endl;
            return;
        }
        */

        int j = 0;

        while(1)
        {
            if(touched)
            {
                //j++;
                //Switch LEDs RED OFF, BLUE ON
                if(red_on == 1)
                {
                    ledProxy->off(FACE_LED_RED);
                    red_on = 0;
                }
                if(blue_on == 0)
                {
                    ledProxy->on(FACE_LED_BLUE);
                    blue_on = 1;
                }
                // Fetch the image from the nao camera, we subscribed on. Its in RGB colorspace
                ALImage *img_cam = (ALImage*)camProxy->getImageLocal(cameraID);

                // Create an openCv Mat header to convert the aldebaran AlImage image.
                // To convert the aldebaran image only the data are of it are assigned to the openCv image.
                Mat img_hsv = Mat(Size(img_cam->getWidth(), img_cam->getHeight()), CV_8UC3);
                img_hsv.data = (uchar*) img_cam->getData();

                // Convert the RGB image from the camera to an HSV image */
                cvtColor(img_hsv, img_hsv, CV_RGB2HSV);

                // RECORDING: record converted to hsv video
                //videoFile.write((char*) img_hsv.data, size); //video ging hier

                // Get the separate HSV color components of the color input image.
                std::vector<Mat> channels(3);
                split(img_hsv, channels);

                Mat planeH = channels[0];
                Mat planeS = channels[1];
                Mat planeV = channels[2];

                // Detect which pixels in each of the H, S and V channels are probably skin pixels.
                threshold(planeH, planeH, 150, UCHAR_MAX, CV_THRESH_BINARY_INV);//18
                threshold(planeS, planeS, 60, UCHAR_MAX, CV_THRESH_BINARY);//50
                threshold(planeV, planeV, 170, UCHAR_MAX, CV_THRESH_BINARY);//80

                // Combine all 3 thresholded color components, so that an output pixel will only
                // be white if the H, S and V pixels were also white.
                Mat imageSkinPixels = Mat(img_hsv.size(), CV_8UC3);	// Greyscale output image.
                bitwise_and(planeH, planeS, imageSkinPixels);	// imageSkin = H {BITWISE_AND} S.
                bitwise_and(imageSkinPixels, planeV, imageSkinPixels);	// imageSkin = H {BITWISE_AND} S {BITWISE_AND} V.

                // Assing the Mat (C++) to an IplImage (C), this is necessary because the blob detection is writtn in old opnCv C version
                IplImage ipl_imageSkinPixels = imageSkinPixels;

                // RECODING: record the video using the C container variable
                // RECODING: store the size (in memory meaning) of the image for recording purpouse
                //size = img_cam->getSize();
                //videoFile.write((char*) ipl_imageSkinPixels.imageData, size/3);

                // Set up the blob detection.
                CBlobResult blobs;
                blobs.ClearBlobs();
                blobs = CBlobResult(&ipl_imageSkinPixels, NULL, 0);	// Use a black background color.

                // Ignore the blobs whose area is less than minArea.
                blobs.Filter(blobs, B_EXCLUDE, CBlobGetArea(), B_LESS, minBlobArea);

                // ##### Gestures #####
                std::cout << "Number of Blobs: " << blobs.GetNumBlobs() <<endl;
                if(blobs.GetNumBlobs() == 0)
                {
                    //picture empty
                }
                else if(blobs.GetNumBlobs() == 1)
                {
                    //head detected
                    trackHead(getCenterPoint(blobs.GetBlob(0)->GetBoundingBox()).x, getCenterPoint(blobs.GetBlob(0)->GetBoundingBox()).y);

                }
                else if(blobs.GetNumBlobs() == 2 || blobs.GetNumBlobs() == 3)
                {
                    //head + one hand || head + two hands
                    Rect rect[3];
                    int indexHead = -1, indexHandLeft = -1, indexHandRight = -1;

                    //Get Bounding Boxes
                    for(int i = 0; i< blobs.GetNumBlobs(); i++)
                    {
                        rect[i] = blobs.GetBlob(i)->GetBoundingBox();
                    }

                    //Detect Head and Hand indexes
                    if(blobs.GetNumBlobs() == 2)
                    {
                        // head and one hand
                        int indexHand = -1;
                        if(getCenterPoint(rect[0]).y < getCenterPoint(rect[1]).y)
                        {
                            // rect[0] is head
                            indexHead = 0;
                            indexHand = 1;
                        }
                        else
                        {
                            // rect[1] is head
                            indexHead = 1;
                            indexHand = 0;
                        }

                        if(getHandside(rect[indexHead], rect[indexHand]) == LEFT)
                        {
                            // hand is left
                            indexHandLeft = 1;
                            indexHandRight = -1;
                        }
                        else
                        {
                            // hand ist right
                            indexHandLeft = -1;
                            indexHandRight = 1;
                        }

                    }
                    else
                    {
                        //two hands
                        int indexHand1 = -1;
                        int indexHand2 = -1;
                        if(getCenterPoint(rect[0]).y < getCenterPoint(rect[1]).y && getCenterPoint(rect[0]).y < getCenterPoint(rect[2]).y)
                        {
                            // rect[0] is head
                            indexHead = 0;
                            indexHand1 = 1;
                            indexHand2 = 2;
                        }
                        else if(getCenterPoint(rect[1]).y < getCenterPoint(rect[0]).y && getCenterPoint(rect[1]).y < getCenterPoint(rect[2]).y)
                        {
                            // rect[1] is head
                            indexHead = 1;
                            indexHand1 = 0;
                            indexHand2 = 2;
                        }
                        else
                        {
                            // rect[2] is head
                            indexHead = 2;
                            indexHand1 = 0;
                            indexHand2 = 1;
                        }

                        if(getHandside(rect[indexHead], rect[indexHand1]) == LEFT)
                        {
                            indexHandLeft = indexHand1;
                            indexHandRight = indexHand2;
                        }
                        else
                        {
                            indexHandLeft = indexHand2;
                            indexHandRight = indexHand1;
                        }
                    }

                    // bobs are detected.
                    // adjuste naos head to detected head-bolb
                    trackHead(getCenterPoint(rect[indexHead]).x, getCenterPoint(rect[indexHead]).y);

                    //Get Orientations from Hand rects
                    leftOrientationCur = (indexHandLeft != -1)?getOrientationOfRect(rect[indexHandLeft]):NONE;
                    rightOrientationCur = (indexHandRight != -1)?getOrientationOfRect(rect[indexHandRight]):NONE;

                    //Check Change of Left hand
                    switch(detectHandStateChange(leftOrientationLast, leftOrientationCur))
                    {
                    case PORTRAIT_TO_LANDSCAPE:
                        handleGestures(LEFT_FLIP_DOWN);
                        break;
                    case LANDSCAPE_TO_PORTRAIT:
                        handleGestures(LEFT_FLIP_UP);
                        break;
                    case NOCHANGE:
                        // TODO
                    default:
                        break;
                    }

                    //Check Change of Right hand
                    switch(detectHandStateChange(rightOrientationLast, rightOrientationCur))
                    {
                    case PORTRAIT_TO_LANDSCAPE:
                        handleGestures(RIGHT_FLIP_DOWN);
                        break;
                    case LANDSCAPE_TO_PORTRAIT:
                        handleGestures(RIGHT_FLIP_UP);
                        break;
                    case NOCHANGE:
                        //TODO
                    default:
                        break;
                    }
                }
                else if(blobs.GetNumBlobs() > 3)
                {
                    //too much information
                    cout<<"too much information"<<endl;
                }

                leftOrientationLast = leftOrientationCur;
                rightOrientationLast = rightOrientationCur;

                // RECODING: close the video recorder
                //videoFile.closeVideo();

                // Free all the resources.
                camProxy->releaseImage(cameraID);

                //IplImage* p_iplImage = &ipl_imageSkinPixels;
                //cvReleaseImage(&p_iplImage);

                qi::os::sleep(0.5f);
                //sleep(1);
            }
            else
            {
                //Switch LEDs RED ON, BLUE OFF
                if(red_on == 0)
                {
                    ledProxy->on(FACE_LED_RED);
                    red_on = 1;
                    behavoirProxy->runBehavior(STAND);
                }
                if(blue_on == 1)
                {
                    ledProxy->off(FACE_LED_BLUE);
                    blue_on = 0;
                }
            }
        }
        camProxy->unsubscribe(cameraID);

    }
    catch (const AL::ALError& e)
    {
        std::cerr << "Caught exception: " << e.what() << std::endl;
        return;
    }
    return;
}