Exemplo n.º 1
0
CV_INLINE void CvvImage::Destroy()
{
   cvReleaseImage( &m_img );
}
int main()
{
#ifdef VISP_HAVE_OPENCV
  try {
    vpVideoReader reader;
    reader.setFileName("video-postcard.mpeg");

    vpImage<unsigned char> I;
    reader.acquire(I);

#if (VISP_HAVE_OPENCV_VERSION < 0x020408)
    IplImage * cvI = NULL;
#else
    cv::Mat cvI;
#endif
    vpImageConvert::convert(I, cvI);

    // Display initialisation
    vpDisplayOpenCV d(I, 0, 0, "Klt tracking");
    vpDisplay::display(I);
    vpDisplay::flush(I);

    vpKltOpencv tracker;
    // Set tracker parameters
    tracker.setMaxFeatures(200);
    tracker.setWindowSize(10);
    tracker.setQuality(0.01);
    tracker.setMinDistance(15);
    tracker.setHarrisFreeParameter(0.04);
    tracker.setBlockSize(9);
    tracker.setUseHarris(1);
    tracker.setPyramidLevels(3);

    // Initialise the tracking
    tracker.initTracking(cvI);

    while ( ! reader.end() )
    {
      reader.acquire(I);
      std::cout << "acquire image " << reader.getFrameIndex() << std::endl;
      vpDisplay::display(I);

      vpImageConvert::convert(I, cvI);

      //! [Re-init tracker]
      // Restart the initialization to detect new keypoints
      if (reader.getFrameIndex() == 25) {
        std::cout << "Re initialize the tracker" << std::endl;
#if (VISP_HAVE_OPENCV_VERSION >= 0x020408)
        // Save of previous features
        std::vector<cv::Point2f> prev_features = tracker.getFeatures();

        // Start a new feature detection
        tracker.initTracking(cvI);
        std::vector<cv::Point2f> new_features = tracker.getFeatures();

        // Add previous features if they are not to close to detected one
        double distance, minDistance_ = tracker.getMinDistance();
        bool is_redundant;
        for (size_t i=0; i < prev_features.size(); i++) {
          // Test if a previous feature is not redundant with one of the newly detected
          is_redundant = false;
          for (size_t j=0; j < new_features.size(); j++){
            distance = sqrt(vpMath::sqr(new_features[j].x-prev_features[i].x) + vpMath::sqr(new_features[j].y-prev_features[i].y));
            if(distance < minDistance_){
              is_redundant = true;
              break;
            }
          }
          if(is_redundant){
            continue;
          }
          //std::cout << "Add previous feature with index " << i << std::endl;
          tracker.addFeature(prev_features[i]);
        }
#else
        // Save of previous features
        int prev_nfeatures = tracker.getNbFeatures();
        float x,y;
        long id;
        int j=0;

        CvPoint2D32f *prev_features = (CvPoint2D32f*)cvAlloc(prev_nfeatures*sizeof(CvPoint2D32f));

        for (int i=0; i <prev_nfeatures ; i ++) {
          tracker.getFeature(i, id, x, y);
          prev_features[i].x=x;
          prev_features[i].y=y;
          //printf("prev feature %d: id %d coord: %g %g\n", i, id, x, y);
        }

        // Start a new feature detection
        tracker.initTracking(cvI);
        std::cout << "Detection of " << tracker.getNbFeatures() << " new features" << std::endl;

        // Add previous features if they are not to close to detected one
        double distance, minDistance_ = tracker.getMinDistance();
        for(int i = tracker.getNbFeatures() ;
            j<prev_nfeatures && i<tracker.getMaxFeatures() ;
            j++){
          // Test if a previous feature is not redundant with new the one that are newly detected
          bool is_redundant = false;
          for(int k=0; k<tracker.getNbFeatures(); k++){
            tracker.getFeature(k,id,x,y);
            //printf("curr feature %d: id %d coord: %g %g\n", k, id, x, y);
            distance = sqrt(vpMath::sqr(x-prev_features[j].x) + vpMath::sqr(y-prev_features[j].y));
            if(distance < minDistance_){
              is_redundant = true;
              break;
            }
          }
          if(is_redundant){
            continue;
          }
          //std::cout << "Add previous feature with index " << i << std::endl;
          tracker.addFeature(i, prev_features[j].x, prev_features[j].y);
          i++;
        }
        cvFree(&prev_features);
#endif
      }
      // Track the features
      tracker.track(cvI);
      //! [Re-init tracker]

      std::cout << "tracking of " << tracker.getNbFeatures() << " features" << std::endl;

      tracker.display(I, vpColor::red);
      vpDisplay::flush(I);
    }

    vpDisplay::getClick(I);

#if (VISP_HAVE_OPENCV_VERSION < 0x020408)
    cvReleaseImage(&cvI);
#endif

    return 0;
  }
  catch(vpException &e) {
    std::cout << "Catch an exception: " << e << std::endl;
  }
#endif
}
Exemplo n.º 3
0
bool CvCaptureCAM_DC1394_v2_CPP::initVidereRectifyMaps( const char* info,
    IplImage* ml[2], IplImage* mr[2] )
{
    float identity_data[] = {1, 0, 0, 0, 1, 0, 0, 0, 1};
    CvMat l_rect = cvMat(3, 3, CV_32F, identity_data), r_rect = l_rect;
    float l_intrinsic_data[] = {1, 0, 0, 0, 1, 0, 0, 0, 1};
    float r_intrinsic_data[] = {1, 0, 0, 0, 1, 0, 0, 0, 1};
    CvMat l_intrinsic = cvMat(3, 3, CV_32F, l_intrinsic_data);
    CvMat r_intrinsic = cvMat(3, 3, CV_32F, r_intrinsic_data);
    float l_distortion_data[] = {0,0,0,0,0}, r_distortion_data[] = {0,0,0,0,0};
    CvMat l_distortion = cvMat(1, 5, CV_32F, l_distortion_data);
    CvMat r_distortion = cvMat(1, 5, CV_32F, r_distortion_data);
    IplImage* mx = cvCreateImage(cvGetSize(ml[0]), IPL_DEPTH_32F, 1);
    IplImage* my = cvCreateImage(cvGetSize(ml[0]), IPL_DEPTH_32F, 1);
    int k, j;

    for( k = 0; k < 2; k++ )
    {
        const char* section_name = k == 0 ? "[left_camera]" : "[right_camera]";
        static const char* param_names[] = { "f ", "fy", "Cx", "Cy" "kappa1", "kappa2", "tau1", "tau2", "kappa3", 0 };
        const char* section_start = strstr( info, section_name );
        CvMat* intrinsic = k == 0 ? &l_intrinsic : &r_intrinsic;
        CvMat* distortion = k == 0 ? &l_distortion : &r_distortion;
        CvMat* rectification = k == 0 ? &l_rect : &r_rect;
        IplImage** dst = k == 0 ? ml : mr;
        if( !section_start )
            break;
        section_start += strlen(section_name);
        for( j = 0; param_names[j] != 0; j++ )
        {
            const char* param_value_start = strstr(section_start, param_names[j]);
            float val=0;
            if(!param_value_start)
                break;
            sscanf(param_value_start + strlen(param_names[j]), "%f", &val);
            if( j < 4 )
                intrinsic->data.fl[j == 0 ? 0 : j == 1 ? 4 : j == 2 ? 2 : 5] = val;
            else
                distortion->data.fl[j - 4] = val;
        }
        if( param_names[j] != 0 )
            break;

        // some sanity check for the principal point
        if( fabs(mx->width*0.5 - intrinsic->data.fl[2]) > mx->width*0.1 ||
            fabs(my->height*0.5 - intrinsic->data.fl[5]) > my->height*0.1 )
        {
            cvScale( &intrinsic, &intrinsic, 0.5 ); // try the corrected intrinsic matrix for 2x lower resolution
            if( fabs(mx->width*0.5 - intrinsic->data.fl[2]) > mx->width*0.05 ||
                fabs(my->height*0.5 - intrinsic->data.fl[5]) > my->height*0.05 )
                cvScale( &intrinsic, &intrinsic, 2 ); // revert it back if the new variant is not much better
            intrinsic->data.fl[8] = 1;
        }

        cvInitUndistortRectifyMap( intrinsic, distortion,
                    rectification, intrinsic, mx, my );
        cvConvertMaps( mx, my, dst[0], dst[1] );
    }

    cvReleaseImage( &mx );
    cvReleaseImage( &my );
    return k >= 2;
}
Exemplo n.º 4
0
// parameters:
//  img - input video frame
//  dst - resultant motion picture
//  args - optional parameters
void  update_mhi( IplImage* img, IplImage* dst, int diff_threshold )
{
    double timestamp = (double)clock()/CLOCKS_PER_SEC; // get current time in seconds
    CvSize size = cvSize(img->width,img->height); // get current frame size
    int i, idx1 = last, idx2;
    IplImage* silh;
    CvSeq* seq;
    CvRect comp_rect;
    double count;
    double angle;
    CvPoint center;
    double magnitude;
    CvScalar color;

    // allocate images at the beginning or
    // reallocate them if the frame size is changed
    if( !mhi || mhi->width != size.width || mhi->height != size.height ) {
        if( buf == 0 ) {
            buf = (IplImage**)malloc(N*sizeof(buf[0]));
            memset( buf, 0, N*sizeof(buf[0]));
        }

        for( i = 0; i < N; i++ ) {
            cvReleaseImage( &buf[i] );
            buf[i] = cvCreateImage( size, IPL_DEPTH_8U, 1 );
            cvZero( buf[i] );
        }
        cvReleaseImage( &mhi );
        cvReleaseImage( &orient );
        cvReleaseImage( &segmask );
        cvReleaseImage( &mask );

        mhi = cvCreateImage( size, IPL_DEPTH_32F, 1 );
        cvZero( mhi ); // clear MHI at the beginning
        orient = cvCreateImage( size, IPL_DEPTH_32F, 1 );
        segmask = cvCreateImage( size, IPL_DEPTH_32F, 1 );
        mask = cvCreateImage( size, IPL_DEPTH_8U, 1 );
    }

    cvCvtColor( img, buf[last], CV_BGR2GRAY ); // convert frame to grayscale

    idx2 = (last + 1) % N; // index of (last - (N-1))th frame
    last = idx2;

    silh = buf[idx2];
    cvAbsDiff( buf[idx1], buf[idx2], silh ); // get difference between frames

    cvThreshold( silh, silh, diff_threshold, 1, CV_THRESH_BINARY ); // and threshold it
    cvUpdateMotionHistory( silh, mhi, timestamp, MHI_DURATION ); // update MHI

    // convert MHI to blue 8u image
    cvCvtScale( mhi, mask, 255./MHI_DURATION,
                (MHI_DURATION - timestamp)*255./MHI_DURATION );
    cvZero( dst );
    cvMerge( mask, 0, 0, 0, dst );

    // calculate motion gradient orientation and valid orientation mask
    cvCalcMotionGradient( mhi, mask, orient, MAX_TIME_DELTA, MIN_TIME_DELTA, 3 );

    printf("Nonzero count %d\n", cvCountNonZero(mask));

    if( !storage )
        storage = cvCreateMemStorage(0);
    else
        cvClearMemStorage(storage);

    // segment motion: get sequence of motion components
    // segmask is marked motion components map. It is not used further
    seq = cvSegmentMotion( mhi, segmask, storage, timestamp, MAX_TIME_DELTA );

    // iterate through the motion components,
    // One more iteration (i == -1) corresponds to the whole image (global motion)
    for( i = -1; i < seq->total; i++ ) {

        if( i < 0 ) { // case of the whole image
            comp_rect = cvRect( 0, 0, size.width, size.height );
            color = CV_RGB(255,255,255);
            magnitude = 100;
        }
        else { // i-th motion component
            comp_rect = ((CvConnectedComp*)cvGetSeqElem( seq, i ))->rect;
            if( comp_rect.width + comp_rect.height < 100 ) // reject very small components
                continue;
            color = CV_RGB(255,0,0);
            magnitude = 30;
        }

        // select component ROI
        cvSetImageROI( silh, comp_rect );
        cvSetImageROI( mhi, comp_rect );
        cvSetImageROI( orient, comp_rect );
        cvSetImageROI( mask, comp_rect );

        // calculate orientation
        angle = cvCalcGlobalOrientation( orient, mask, mhi, timestamp, MHI_DURATION);
        angle = 360.0 - angle;  // adjust for images with top-left origin

        count = cvNorm( silh, 0, CV_L1, 0 ); // calculate number of points within silhouette ROI

        cvResetImageROI( mhi );
        cvResetImageROI( orient );
        cvResetImageROI( mask );
        cvResetImageROI( silh );

        // check for the case of little motion
        if( count < comp_rect.width*comp_rect.height * 0.05 )
            continue;

        // draw a clock with arrow indicating the direction
        center = cvPoint( (comp_rect.x + comp_rect.width/2),
                          (comp_rect.y + comp_rect.height/2) );

        cvCircle( dst, center, cvRound(magnitude*1.2), color, 3, CV_AA, 0 );
        cvLine( dst, center, cvPoint( cvRound( center.x + magnitude*cos(angle*CV_PI/180)),
                                      cvRound( center.y - magnitude*sin(angle*CV_PI/180))), color, 3, CV_AA, 0 );
    }
}
/*
// Getting feature map for the selected subimage
//
// API
// int getFeatureMaps(const IplImage * image, const int k, featureMap **map);
// INPUT
// image             - selected subimage
// k                 - size of cells
// OUTPUT
// map               - feature map
// RESULT
// Error status
*/
int getFeatureMaps(const IplImage* image, const int k, CvLSVMFeatureMap **map)
{
    int sizeX, sizeY;
    int p, px, stringSize;
    int height, width, numChannels;
    int i, j, kk, c, ii, jj, d;
    float  * datadx, * datady;
    
    //номер канала в цикле
    int   ch; 
    //переменные вычисления магнитуды
    float magnitude, x, y, tx, ty;
    
    IplImage * dx, * dy;
    int *nearest;
    float *w, a_x, b_x;

    // ядро для вычисления градиентов изображение по осям x и y
    float kernel[3] = {-1.f, 0.f, 1.f};
    CvMat kernel_dx = cvMat(1, 3, CV_32F, kernel);
    CvMat kernel_dy = cvMat(3, 1, CV_32F, kernel);

    // грачение градиента
    float * r;
    // новер сектора куда попало значение градиента
    //     четные иннексы не контрастное изображение
    //  не четные иннексы    контрастное изображение
    int   * alfa;
    
    // векторы границ секторов
    float boundary_x[NUM_SECTOR + 1];
    float boundary_y[NUM_SECTOR + 1];
    float max, dotProd;
    int   maxi;

    height = image->height;
    width  = image->width ;

    numChannels = image->nChannels;

    dx    = cvCreateImage(cvSize(image->width, image->height), 
                          IPL_DEPTH_32F, 3);
    dy    = cvCreateImage(cvSize(image->width, image->height), 
                          IPL_DEPTH_32F, 3);

    sizeX = width  / k;
    sizeY = height / k;
    px    = 3 * NUM_SECTOR; // контрастное и не контрастное изображение
    p     = px;
    stringSize = sizeX * p;
    allocFeatureMapObject(map, sizeX, sizeY, p);

    cvFilter2D(image, dx, &kernel_dx, cvPoint(-1, 0));
    cvFilter2D(image, dy, &kernel_dy, cvPoint(0, -1));
    
    float arg_vector;
    for(i = 0; i <= NUM_SECTOR; i++)
    {
        arg_vector    = ( (float) i ) * ( (float)(PI) / (float)(NUM_SECTOR) );
        boundary_x[i] = cosf(arg_vector);
        boundary_y[i] = sinf(arg_vector);
    }/*for(i = 0; i <= NUM_SECTOR; i++) */

    r    = (float *)malloc( sizeof(float) * (width * height));
    alfa = (int   *)malloc( sizeof(int  ) * (width * height * 2));

    for(j = 1; j < height - 1; j++)
    {
        datadx = (float*)(dx->imageData + dx->widthStep * j);
        datady = (float*)(dy->imageData + dy->widthStep * j);
        for(i = 1; i < width - 1; i++)
        {
            c = 0;
            x = (datadx[i * numChannels + c]);
            y = (datady[i * numChannels + c]);

            r[j * width + i] =sqrtf(x * x + y * y);
            for(ch = 1; ch < numChannels; ch++)
            {
                tx = (datadx[i * numChannels + ch]);
                ty = (datady[i * numChannels + ch]);
                magnitude = sqrtf(tx * tx + ty * ty);
                if(magnitude > r[j * width + i])
                {
                    r[j * width + i] = magnitude;
                    c = ch;
                    x = tx;
                    y = ty;
                }
            }/*for(ch = 1; ch < numChannels; ch++)*/
            
            max  = boundary_x[0] * x + boundary_y[0] * y;
            maxi = 0;
            for (kk = 0; kk < NUM_SECTOR; kk++) 
            {
                dotProd = boundary_x[kk] * x + boundary_y[kk] * y;
                if (dotProd > max) 
                {
                    max  = dotProd;
                    maxi = kk;
                }
                else 
                {
                    if (-dotProd > max) 
                    {
                        max  = -dotProd;
                        maxi = kk + NUM_SECTOR;
                    }
                }
            }
            alfa[j * width * 2 + i * 2    ] = maxi % NUM_SECTOR;
            alfa[j * width * 2 + i * 2 + 1] = maxi;  
        }/*for(i = 0; i < width; i++)*/
    }/*for(j = 0; j < height; j++)*/

    //подсчет весов и смещений
    nearest = (int  *)malloc(sizeof(int  ) *  k);
    w       = (float*)malloc(sizeof(float) * (k * 2));
    
    for(i = 0; i < k / 2; i++)
    {
        nearest[i] = -1;
    }/*for(i = 0; i < k / 2; i++)*/
    for(i = k / 2; i < k; i++)
    {
        nearest[i] = 1;
    }/*for(i = k / 2; i < k; i++)*/

    for(j = 0; j < k / 2; j++)
    {
        b_x = k / 2 + j + 0.5f;
        a_x = k / 2 - j - 0.5f;
        w[j * 2    ] = 1.0f/a_x * ((a_x * b_x) / ( a_x + b_x)); 
        w[j * 2 + 1] = 1.0f/b_x * ((a_x * b_x) / ( a_x + b_x));  
    }/*for(j = 0; j < k / 2; j++)*/
    for(j = k / 2; j < k; j++)
    {
        a_x = j - k / 2 + 0.5f;
        b_x =-j + k / 2 - 0.5f + k;
        w[j * 2    ] = 1.0f/a_x * ((a_x * b_x) / ( a_x + b_x)); 
        w[j * 2 + 1] = 1.0f/b_x * ((a_x * b_x) / ( a_x + b_x));  
    }/*for(j = k / 2; j < k; j++)*/


    //интерполяция
    for(i = 0; i < sizeY; i++)
    {
      for(j = 0; j < sizeX; j++)
      {
        for(ii = 0; ii < k; ii++)
        {
          for(jj = 0; jj < k; jj++)
          {
            if ((i * k + ii > 0) && 
                (i * k + ii < height - 1) && 
                (j * k + jj > 0) && 
                (j * k + jj < width  - 1))
            {
              d = (k * i + ii) * width + (j * k + jj);
              (*map)->map[ i * stringSize + j * (*map)->numFeatures + alfa[d * 2    ]] += 
                  r[d] * w[ii * 2] * w[jj * 2];
              (*map)->map[ i * stringSize + j * (*map)->numFeatures + alfa[d * 2 + 1] + NUM_SECTOR] += 
                  r[d] * w[ii * 2] * w[jj * 2];
              if ((i + nearest[ii] >= 0) && 
                  (i + nearest[ii] <= sizeY - 1))
              {
                (*map)->map[(i + nearest[ii]) * stringSize + j * (*map)->numFeatures + alfa[d * 2    ]             ] += 
                  r[d] * w[ii * 2 + 1] * w[jj * 2 ];
                (*map)->map[(i + nearest[ii]) * stringSize + j * (*map)->numFeatures + alfa[d * 2 + 1] + NUM_SECTOR] += 
                  r[d] * w[ii * 2 + 1] * w[jj * 2 ];
              }
              if ((j + nearest[jj] >= 0) && 
                  (j + nearest[jj] <= sizeX - 1))
              {
                (*map)->map[i * stringSize + (j + nearest[jj]) * (*map)->numFeatures + alfa[d * 2    ]             ] += 
                  r[d] * w[ii * 2] * w[jj * 2 + 1];
                (*map)->map[i * stringSize + (j + nearest[jj]) * (*map)->numFeatures + alfa[d * 2 + 1] + NUM_SECTOR] += 
                  r[d] * w[ii * 2] * w[jj * 2 + 1];
              }
              if ((i + nearest[ii] >= 0) && 
                  (i + nearest[ii] <= sizeY - 1) && 
                  (j + nearest[jj] >= 0) && 
                  (j + nearest[jj] <= sizeX - 1))
              {
                (*map)->map[(i + nearest[ii]) * stringSize + (j + nearest[jj]) * (*map)->numFeatures + alfa[d * 2    ]             ] += 
                  r[d] * w[ii * 2 + 1] * w[jj * 2 + 1];
                (*map)->map[(i + nearest[ii]) * stringSize + (j + nearest[jj]) * (*map)->numFeatures + alfa[d * 2 + 1] + NUM_SECTOR] += 
                  r[d] * w[ii * 2 + 1] * w[jj * 2 + 1];
              }
            }
          }/*for(jj = 0; jj < k; jj++)*/
        }/*for(ii = 0; ii < k; ii++)*/
      }/*for(j = 1; j < sizeX - 1; j++)*/
    }/*for(i = 1; i < sizeY - 1; i++)*/
    
    cvReleaseImage(&dx);
    cvReleaseImage(&dy);


    free(w);
    free(nearest);
    
    free(r);
    free(alfa);

    return LATENT_SVM_OK;
}
Exemplo n.º 6
0
// Runs the dot detector and sends detected dots to server on port TODO Implement headless. Needs more config options and/or possibly a config file first though
int run( const char *serverAddress, const int serverPort, char headless ) {
    char calibrate_exposure = 0, show = ~0, flip = 0, vflip = 0, done = 0, warp = 0; //"Boolean" values used in this loop
    char noiceReduction = 2; //Small counter, so char is still ok.
    int i, sockfd; //Generic counter
    int dp = 0, minDist = 29, param1 = 0, param2 = 5; // Configuration variables for circle detection 
    int minDotRadius = 1;
    int detected_dots; //Detected dot counter
    int returnValue = EXIT_SUCCESS;
    int captureControl; //File descriptor for low-level camera controls
    int currentExposure = 150;
    int maxExposure = 1250; //Maximum exposure supported by the camera TODO Get this from the actual camera
    Color min = { 0, 70, 0, 0 }; //Minimum color to detect
    Color max = { 255, 255, 255, 0 }; //Maximum color to detect
    CvScalar colorWhite = cvScalar( WHITE ); //Color to draw detected dots on black and white surface
    BoundingBox DD_mask; //The box indicating what should and what should not be considered for dot search
    BoundingBox DD_transform; //The box indicating the plane we are looking at( and as such is the plane we would transform from )
    BoundingBox DD_transform_to; //The plane we are transforming to
    CvCapture *capture = NULL; //The camera
    CvMemStorage *storage; //Low level memory area used for dynamic structures in OpenCV
    CvSeq *seq; //Sequence to store detected dots in
    IplImage *grabbedImage = NULL; //Raw image from camera( plus some overlay in the end )
    IplImage *imgThreshold = NULL; //Image with detected dots
    IplImage *mask = NULL; //Mask to be able to remove uninteresting areas
    IplImage *coloredMask = NULL; //Mask to be able to indicate above mask on output image
    CvFont font; //Font for drawing text on images
    SendQueue *queue; //Head of the linked list that is the send queue
    char strbuf[255]; //Generic buffer for text formatting( with sprintf())
    struct timeval oldTime, time, diff; //Structs for measuring FPS
    float lastKnownFPS = 0; //Calculated FPS
    CvMat* pointRealMat = cvCreateMat( 1,1,CV_32FC2 ); //Single point matrix for point transformation
    CvMat* pointTransMat = cvCreateMat( 1,1,CV_32FC2 ); //Single point matrix for point transformation
    CvMat* transMat = cvCreateMat( 3,3,CV_32FC1 ); //Translation matrix for transforming input to a straight rectangle
    ClickParams clickParams = { TOP_LEFT, NULL, &DD_transform_to, transMat }; //Struct holding data needed by mouse-click callback function

    // Set up network
    sockfd = initNetwork( serverAddress, serverPort );
    if( sockfd == -1 ) {
        fprintf( stderr, "ERROR: initNetwork returned -1\n");
        return EXIT_FAILURE;
    }
    queue = initSendQueue();

    if( openCamera( &capture, &captureControl ) == 0 ) {
        fprintf( stderr, "ERROR: capture is NULL \n" );
        return EXIT_FAILURE;
    }

    if( ( disableAutoExposure( captureControl ) ) == -1 ) {
        fprintf( stderr, "ERROR: Cannot disable auto exposure \n" );
        //return EXIT_FAILURE;
    }

    if( ( updateAbsoluteExposure( captureControl, currentExposure ) ) == 0 ) {
        fprintf( stderr, "ERROR: Cannot set exposure\n");
    }

    // Create a window in which the captured images will be presented
    cvNamedWindow( imagewindowname, CV_WINDOW_AUTOSIZE | CV_WINDOW_KEEPRATIO | CV_GUI_NORMAL );

    // Create a window to hold the configuration sliders and the detection frame TODO This is kind of a hack. Make a better solution
    cvNamedWindow( configwindowname, CV_WINDOW_AUTOSIZE | CV_WINDOW_KEEPRATIO | CV_GUI_NORMAL );

    // Create a window to hold the transformed image. Handy to see how the dots are translated, but not needed for functionality
    if( warp ) cvNamedWindow( warpwindowname, CV_WINDOW_AUTOSIZE | CV_WINDOW_KEEPRATIO | CV_GUI_NORMAL );

    // Create sliders to adjust the lower color boundry
    cvCreateTrackbar( red_lable  , configwindowname, &min.red,   255, NULL );
    cvCreateTrackbar( green_lable, configwindowname, &min.green, 255, NULL );
    cvCreateTrackbar( blue_lable , configwindowname, &min.blue,  255, NULL );

    //Create sliters for the contour based dot detection
    cvCreateTrackbar( min_area_lable, configwindowname, &minDotRadius,255, NULL );

    /* Slider for manual exposure setting */
    cvCreateTrackbar( exposure_lable, configwindowname, &currentExposure, maxExposure, NULL );

    //Create the memory storage
    storage = cvCreateMemStorage( 0 );

    // void cvInitFont( font, font_face, hscale, vscale, shear=0, thickness=1, line_type=8 )
    cvInitFont( &font, CV_FONT_HERSHEY_PLAIN, 1, 1, 0, 1, 8 );

    // Grab an initial image to be able to fetch image size before the main loop.
    grabbedImage = cvQueryFrame( capture );

    //Move the two windows so both are visible at the same time
    cvMoveWindow( imagewindowname, 0, 10 );
    cvMoveWindow( configwindowname, grabbedImage->width+2, 10 );

    //TODO Move these three inits to a function
    // Set masking defaults TODO load from file? Specify file for this file loading?
    DD_mask.topLeft.x = 0;  
    DD_mask.topLeft.y = 0;

    DD_mask.topRight.x = grabbedImage->width-1;
    DD_mask.topRight.y = 0;

    DD_mask.bottomLeft.x = 0;
    DD_mask.bottomLeft.y = grabbedImage->height-1;

    DD_mask.bottomRight.x = grabbedImage->width-1;
    DD_mask.bottomRight.y = grabbedImage->height-1;

    // Set transformation defaults TODO load from file? Specify file for this file loading?
    DD_transform.topLeft.x = 0;  
    DD_transform.topLeft.y = 0;

    DD_transform.topRight.x = grabbedImage->width-1;
    DD_transform.topRight.y = 0;

    DD_transform.bottomLeft.x = 0;
    DD_transform.bottomLeft.y = grabbedImage->height-1;

    DD_transform.bottomRight.x = grabbedImage->width-1;
    DD_transform.bottomRight.y = grabbedImage->height-1;

    // Set the transformation destination
    DD_transform_to.topLeft.x = 0;  
    DD_transform_to.topLeft.y = 0;

    DD_transform_to.topRight.x = grabbedImage->width-1;
    DD_transform_to.topRight.y = 0;

    DD_transform_to.bottomLeft.x = 0;
    DD_transform_to.bottomLeft.y = grabbedImage->height-1;

    DD_transform_to.bottomRight.x = grabbedImage->width-1;
    DD_transform_to.bottomRight.y = grabbedImage->height-1;

    calculateTransformationMatrix( &DD_transform, &DD_transform_to, transMat );

    // Set callback function for mouse clicks
    cvSetMouseCallback( imagewindowname, calibrateClick, ( void* ) &clickParams );

    gettimeofday( &oldTime, NULL );

    // Main loop. Grabbs an image from cam, detects dots, sends dots,and prints dots to images and shows to user
    while( !done ) {

        //PROFILING_PRO_STAMP(); //Uncomment this and the one in the end of the while-loop, and comment all other PROFILING_* to profile main-loop

        // ------ Common actions
        cvClearMemStorage( storage );
        detected_dots = 0;

        //Grab a fram from the camera
        PROFILING_PRO_STAMP();
        grabbedImage = cvQueryFrame( capture );
        PROFILING_POST_STAMP( "cvQueryFrame");

        if( grabbedImage == NULL ) {
            fprintf( stderr, "ERROR: frame is null...\n" );
            getchar();
            returnValue = EXIT_FAILURE;
            break;
        }

        //Flip images to act as a mirror. 
        if( show && flip ) {
            cvFlip( grabbedImage, grabbedImage, 1 );
        }
        if( show && vflip ) {
            cvFlip( grabbedImage, grabbedImage, 0 );
        }

        // ------ State based actions
        switch( state ) {
            case GRAB_DOTS:

                //Create detection image
                imgThreshold = cvCreateImage( cvGetSize( grabbedImage ), 8, 1 );
                cvInRangeS( grabbedImage, cvScalar( DD_COLOR( min )), cvScalar( DD_COLOR( max )), imgThreshold );

                //Mask away anything not in our calibration area
                mask = cvCreateImage( cvGetSize( grabbedImage ), 8, 1 );
                cvZero( mask );
                cvFillConvexPoly( mask, ( CvPoint* ) &DD_mask, 4, cvScalar( WHITE ), 1, 0 );
                cvAnd( imgThreshold, mask, imgThreshold, NULL );

                // Invert mask, increase the number of channels in it and overlay on grabbedImage //TODO Tint the mask red before overlaying
                cvNot( mask, mask );
                coloredMask = cvCreateImage( cvGetSize( grabbedImage ), grabbedImage->depth, grabbedImage->nChannels );
                cvCvtColor( mask, coloredMask, CV_GRAY2BGR );
                cvAddWeighted( grabbedImage, 0.95, coloredMask, 0.05, 0.0, grabbedImage );


                // Reduce noise. 
                // Erode is kind of floor() of pixels, dilate is kind of ceil()
                // I'm not sure which gives the best result.
                switch( noiceReduction ) {
                    case 0: break; //No noice reduction at all
                    case 1: cvErode( imgThreshold, imgThreshold, NULL, 2 ); break;
                    case 2: cvDilate( imgThreshold, imgThreshold, NULL, 2 ); break;
                }

                // Warp the warp-image. We are reusing the coloredMask variable to save some space
                PROFILING_PRO_STAMP();
                if( show && warp ) cvWarpPerspective( grabbedImage, coloredMask, transMat, CV_INTER_LINEAR+CV_WARP_FILL_OUTLIERS, cvScalarAll( 0 ));
                PROFILING_POST_STAMP( "Warping perspective" );


                // Find all dots in the image
                PROFILING_PRO_STAMP();

                // Clear old data from seq
                seq = 0;

                // Find the dots
                cvFindContours(
                        imgThreshold,
                        storage,
                        &seq,
                        sizeof( CvContour ),
                        CV_RETR_LIST,
                        CV_CHAIN_APPROX_SIMPLE,
                        cvPoint( 0,0 )
                        );
                // cvFindContours destroys the original image, so we wipe it here
                // and then repaints the detected dots later
                cvZero( imgThreshold );

                PROFILING_POST_STAMP( "Dot detection" );

                //Process all detected dots
                PROFILING_PRO_STAMP();
                for( ; seq != 0; seq = seq->h_next ) {

                    // Calculate radius of the detected contour
                    CvRect rect =( ( CvContour * )seq )->rect;
                    float relCenterX = rect.width / 2;
                    float relCenterY = rect.height / 2;

                    // Make sure the dot is big enough
                    if( relCenterX < minDotRadius || relCenterY < minDotRadius ) {
                        continue;
                    }

                    // Note that we have found another dot
                    ++detected_dots;

                    // Transform the detected dot according to transformation matrix.
                    float absCenter[] = { rect.x + relCenterX, rect.y + relCenterY };
                    pointRealMat->data.fl = absCenter;
                    cvPerspectiveTransform( pointRealMat, pointTransMat, transMat );

                    // Draw the detected contour back to imgThreshold
                    // Draw the detected dot both to real image and to warped( if warp is active )
                    if( show ) {
                        cvDrawContours( imgThreshold, seq, colorWhite, colorWhite, -1, CV_FILLED, 8, cvPoint( 0,0 ) );
                        drawCircle( absCenter[0], absCenter[1], ( relCenterX + relCenterY ) / 2, grabbedImage );
                        if( warp ) {
                            drawCircle( pointTransMat->data.fl[0], pointTransMat->data.fl[1], ( relCenterX + relCenterY ) / 2, coloredMask );
                        }
                    }

                    // Add detected dot to to send queue
                    addPointToSendQueue( pointTransMat->data.fl, queue ); 
                }

                PROFILING_POST_STAMP("Painting dots");

                //Calculate framerate
                gettimeofday( &time, NULL );
                timeval_subtract( &diff, &time, &oldTime );
                lastKnownFPS = lastKnownFPS * 0.7 + ( 1000000.0 / diff.tv_usec ) * 0.3; //We naïvly assume we have more then 1 fps
                oldTime = time;

                //Send the dots detected this frame to the server
                PROFILING_PRO_STAMP();
                sendQueue( sockfd, queue );
                clearSendQueue( queue );
                PROFILING_POST_STAMP( "Sending dots" );

                /* If calibrating, do the calibration */
                if( calibrate_exposure ) {
                    int ret;
                    ret = calibrateExposureLow( captureControl, detected_dots, &currentExposure, DD_MAX_EXPOSURE, lastKnownFPS );
                    switch( ret ) {
                        case 0: // We are done. Let's leave calibration mode
                            calibrate_exposure = 0;
                            printf( "done\n" );
                            break;

                        case -1: // We hit the upper limit with no detected dots
                            fprintf( stderr, "Reached upper limit (%d). Aborting!\n", DD_MAX_EXPOSURE );
                            calibrate_exposure = 0;
                            break;

                        case -2: // We hit lower limit with more then one dot detected
                            fprintf( stderr, "Too bright. More then one dot found even with minimal exposure. Aborting!\n");
                            calibrate_exposure = 0;
                            break;

                        case -3: //No conclusive results.
                            fprintf( stderr, "No conclusive results. Giving up\n" );
                            calibrate_exposure = 0;
                            break;
                    }
                }

                break; //End of GRAB_DOTS

            case SELECT_TRANSFORM:
                //Falling through here. Poor man's multi-case clause. Not putting this in default as we might
                //want to do different things in these two some day.
            case SELECT_MASK:
                snprintf( strbuf, sizeof( strbuf ), "Select %s point", pointTranslationTable[clickParams.currentPoint]);
                cvDisplayOverlay( imagewindowname, strbuf, 5 );
                break; //End of SELECT_MASK and SELECT_TRANSFORM
        }

        // Paint the corners of the detecting area and the calibration area
        paintOverlayPoints( grabbedImage, &DD_transform );

        //Print some statistics to the image
        if( show ) {
            snprintf( strbuf, sizeof( strbuf ), "Dots: %i", detected_dots ); //Print number of detected dots to the screen
            cvPutText( grabbedImage, strbuf, cvPoint( 10, 20 ), &font, cvScalar( WHITE ));
            snprintf( strbuf, sizeof( strbuf ), "FPS: %.1f", lastKnownFPS );
            cvPutText( grabbedImage, strbuf, cvPoint( 10, 40 ), &font, cvScalar( WHITE ));
            cvCircle( grabbedImage, cvPoint( 15, 55 ), minDotRadius, cvScalar( min.blue, min.green, min.red, min.alpha ), -1, 8, 0 ); // Colors given in order BGR-A, Blue, Green, Red, Alpha
        }

        //Show images 
        PROFILING_PRO_STAMP();
        if( show ) {
            cvShowImage( configwindowname, imgThreshold );
            cvShowImage( imagewindowname, grabbedImage );
            if( warp ) cvShowImage( warpwindowname, coloredMask );
        }
        PROFILING_POST_STAMP("Showing images");

        //Release the temporary images
        cvReleaseImage( &imgThreshold );
        cvReleaseImage( &mask );
        cvReleaseImage( &coloredMask );

        /* Update exposure if needed */
        updateAbsoluteExposure( captureControl, currentExposure );
        cvSetTrackbarPos( exposure_lable, configwindowname, currentExposure );

        //If ESC key pressed, Key=0x10001B under OpenCV 0.9.7( linux version ),
        //remove higher bits using AND operator
        i = ( cvWaitKey( 10 ) & 0xff );
        switch( i ) {
            case 'g': 
                makeCalibrate( &DD_transform, &DD_transform_to, transMat, capture, captureControl, 20 );
                updateAbsoluteExposure( captureControl, currentExposure+1 );
                break;

            case 'e': 
                toggleCalibrationMode( &calibrate_exposure, &currentExposure );
                break; /* Toggles calibration mode */

            case 'c':
                openCamera( &capture, &captureControl );
                break;

            case 's': 
                show = ~show;
                break; //Toggles updating of the image. Can be useful for performance of slower machines... Or as frame freeze

            case 'm': 
                state = SELECT_MASK;
                clickParams.currentPoint = TOP_LEFT;
                clickParams.DD_box = &DD_mask;
                break; //Starts selection of masking area. Will return to dot detection once all four points are set

            case 't':
                state = SELECT_TRANSFORM;
                clickParams.currentPoint = TOP_LEFT;
                clickParams.DD_box = &DD_transform;
                break; //Starts selection of the transformation area. Returns to dot detection when done.

            case 'f':
                flip = ~flip;
                break; //Toggles horizontal flipping of the image
            case 'v':
                vflip = ~vflip;
                break; //Toggles vertical flipping of the image

            case 'w':
                warp = ~warp;
                toggleWarpOutput( warp );
                break; //Toggles showing the warped image

            case 'n':
                noiceReduction = ( noiceReduction + 1 ) % 3;
                break; //Cycles noice reduction algorithm

            case 'q': //falling through here to quit

            case  27: 
                done = 1;
                break; //ESC. Kills the whole thing( in a nice and controlled manner )
        }
        fflush( stdout ); //Make sure everything in the buffer is printed before we go on

        //PROFILING_POST_STAMP("Main loop");
    } //End of main while-loop

    // Release the capture device and do some housekeeping
    cvReleaseImage( &grabbedImage );
    cvReleaseCapture( &capture );
    cvReleaseMemStorage( &storage );
    cvDestroyWindow( imagewindowname );
    cvDestroyWindow( configwindowname );
    if( warp ) cvDestroyWindow( warpwindowname ); //If now warp it is already destroyed
    destroySendQueue( queue );
    close( sockfd );
    close( captureControl );
    return returnValue;
}
int main(int argc, char **argv) {
	int i,index=0;
	int width_img=0;	// Frame width
	int height_img=0;	// Frame height
	double fps=0.0;		// FPS (Frames Per Second)
	int frame=0;		// Frame number (index)
	int msec;
	int total_frames=0;	// Total frames
	int marked_frames=0;	// Marked frame
	int *check_frames;			// Contains indeces of marked frames
	int *list_of_frames;		// List of frames
	double *ecr;
	IplImage *previous_frame;	// Previous frame
	IplImage *current_frame;	// Current frame
	IplImage *bgr_frame;	// Frame
	IplImage *new_frame;	// Frame
	CvCapture* capture=0;	// Capture struct
	CvSize size;		// Size of frame (width x height)
	FILE *fp;		// TXT file pointer
	clock_t start, stop, diff; // Timer
	
	// Text variables
	CvScalar black = CV_RGB(255,0,0);
	CvFont font1;
	int thickness = 2.0;
	char text1[20] = "0"; // frame number
	char text2[20] = "0"; // frame msec positiion
	double hscale = 1.0;
	double vscale = 1.0;
	double shear = 0.0;

	// Check if the user gave arguments
	if(argc != 4) {
		fprintf(stderr, "\nUSAGE: %s <input_video_file> <output_video_file> <output_TXT_file>\n", argv[0]);
		return EXIT_FAILURE;
	}

	
	
	/**** STAGE 1: PROCESS FRAMES ****/
	
	capture = cvCreateFileCapture(argv[1]);	// Open video file to start capture
	if(!capture) {
		printf("Error opening video file! (cvCreateFileCapture)\n");
		return EXIT_FAILURE;
	}

	fp=fopen(argv[3],"w");		// Open file to write stats
	if(fp == NULL) {
		printf("Error opening file! (fopen)\n");
		return EXIT_FAILURE;
	}

	fps = cvGetCaptureProperty(capture,CV_CAP_PROP_FPS);				// Get FPS
	width_img = cvGetCaptureProperty(capture,CV_CAP_PROP_FRAME_WIDTH);		// Get frame width
	height_img = cvGetCaptureProperty(capture,CV_CAP_PROP_FRAME_HEIGHT);		// Get frame height
	total_frames = cvGetCaptureProperty(capture,CV_CAP_PROP_FRAME_COUNT);		// Get total frames
	size = cvSize(width_img,height_img);						// Get size of frames

	check_frames = (int *)malloc(sizeof(*check_frames) * total_frames);
	list_of_frames = (int *)malloc(sizeof(*list_of_frames) * total_frames);
	ecr = (double *)malloc(sizeof(*ecr) * total_frames);
	if (check_frames == NULL || list_of_frames == NULL || ecr == NULL) {
		printf("Error allocating memory!\n");
		return EXIT_FAILURE;
	}

	// Initialize arrays
	for(i=0;i<total_frames;i++) {
		ecr[i]=0.0;
		check_frames[i]=0;
		list_of_frames[i]=0;
	}
	
	cvInitFont(&font1,CV_FONT_HERSHEY_SIMPLEX,hscale,vscale,shear,thickness,CV_AA);
	
	CvPoint pt1 = cvPoint(5,30);
	CvPoint pt2 = cvPoint(5,70);
	
	fprintf(fp,"Filename\t:\t%s\n\nFrame width\t:\t%d\nFrame height\t:\t%d\nFPS\t\t:\t%f\nTotal frames\t:\t%d\n\n\n\n",argv[1],width_img,height_img,fps,total_frames);
	printf("Filename\t:\t%s\n\nFrame width\t:\t%d\nFrame height\t:\t%d\nFPS\t\t:\t%f\nTotal frames\t:\t%d\n\n\n\n",argv[1],width_img,height_img,fps,total_frames);
	
	printf("Start processing frames...\n\n");
	
	start = clock(); // Start timer
	
	bgr_frame=cvQueryFrame(capture);												// Grab first frame
	previous_frame = cvCreateImage(size, bgr_frame->depth, bgr_frame->nChannels);	// Create the previous frame
	current_frame = cvCreateImage(size, bgr_frame->depth, bgr_frame->nChannels);	// Create the current frame
	cvCopy(bgr_frame,previous_frame,NULL);											// Save the copy
	
	// Grab frames from the video until NULL
	while((bgr_frame=cvQueryFrame(capture)) != NULL) {
		/* When entering this loop, we have already grabbed a frame
		 * so the frame counter starts from 2
		 */
		frame = cvGetCaptureProperty(capture,CV_CAP_PROP_POS_FRAMES);					// Get the current frame number
		
		cvCopy(bgr_frame,current_frame,NULL);											// Save the copy
		
		/**** START PROCESSING ****/
		ecrdiff_v2(current_frame, previous_frame, size, frame, fp, &index);
		/**** END PROCESSING ****/

		cvCopy(bgr_frame,previous_frame,NULL);	// Save the copy
		
		if(index==1) {
			check_frames[frame]=1;	// It means that the specific frame is marked
		}
		
		printf("Processing frame %d...\r",frame);
		fflush(stdout);
	}
	
	cvReleaseImage(&bgr_frame);			// Release bgr_frame
	cvReleaseImage(&previous_frame);	// Release previous_frame
	cvReleaseImage(&current_frame);		// Release current_frame
	cvReleaseCapture(&capture);			// Release capture
	
	stop = clock();			// Stop timer
	diff = stop - start;	// Get difference between start time and current time;
	printf("\n\nTotal time processing frames : %f minutes\t%f seconds\n", (((float)diff)/CLOCKS_PER_SEC)/60, ((float)diff)/CLOCKS_PER_SEC);
	printf("Processing completed!\n");
	
	fprintf(fp,"\n\n\n\nMarked frames\n\n");
	printf("\n\n\n\nMarked frames\n\n");

	for(i=0;i<total_frames;i++)	{
		if(check_frames[i]==1) {
			list_of_frames[i]=i;
			fprintf(fp,"frame %d\n",i);	// Write to file only marked frames
			printf("frame %d\n",i);	// Write to file only marked frames
			marked_frames++;
		}
	}

	fprintf(fp,"\n\nTotal marked frames\t:\t%d\n",marked_frames);
	printf("\n\nTotal marked frames\t:\t%d\n\n",marked_frames);

	//If there is no markeed frames, exit
	if(marked_frames == 0) {
		return EXIT_SUCCESS;
	}
	
	
	
	/**** STAGE 2: WRITE VIDEO ****/
	
	capture = cvCreateFileCapture(argv[1]);	// Re-Open video file to start capture
	if(!capture) {
		printf("Error opening video file! (cvCreateFileCapture)\n");
		return EXIT_FAILURE;
	}
	
	CvVideoWriter *writer = cvCreateVideoWriter(argv[2],CV_FOURCC('F','M','P','4'),fps,size,1);
	
	printf("Start writing frames...\n\n");
	
	start = clock(); // Start timer

	bgr_frame = cvQueryFrame(capture);	// Retrieve frame
	new_frame = cvCreateImage(size, bgr_frame->depth, bgr_frame->nChannels);	// Create the new frame
	
	do {
		frame = cvGetCaptureProperty(capture,CV_CAP_PROP_POS_FRAMES);	// Get the current frame number
		msec = cvGetCaptureProperty(capture,CV_CAP_PROP_POS_MSEC);
		msec=msec/1000;
		
		// If the index number of the current frame is equal to the frame we want, then write it to the stream.
		if(frame == list_of_frames[frame]) {
			cvCopy(bgr_frame,new_frame,NULL);	// Save the copy
			
			sprintf(text1,"%d frame",frame); // int to char via sprintf()
			cvPutText(new_frame,text1,pt1,&font1,black); // frame number

			sprintf(text2,"%d sec",msec); // int to char via sprintf()
			cvPutText(new_frame,text2,pt2,&font1,black); // frame msec position
			
			cvWriteFrame(writer, new_frame);	// Write frame to video stream
		} else {
			cvWriteFrame(writer, new_frame);	// Write frame to video stream
		}
		
		printf("Writing frame %d...\r",frame);
		fflush(stdout); // For '/r' to work we have to flush the output stream
	} while((bgr_frame=cvQueryFrame(capture)) != NULL);
		
	stop = clock(); 		// Stop timer
	diff = stop - start;	// Get difference between start time and current time;
	printf("\n\nTotal time writing frames : %f minutes\t%f seconds\n", (((float)diff)/CLOCKS_PER_SEC)/60, ((float)diff)/CLOCKS_PER_SEC);
	printf("Writing completed!\n\n");

	fclose(fp);					// Close file pointer
	free(list_of_frames);		// Free list_of_frames
	free(check_frames);			// Free check_frames
	free(ecr);					// Free ecr
	cvReleaseImage(&bgr_frame);	// Release bgr_frame
	cvReleaseImage(&new_frame);	// Release new_frame
	cvReleaseCapture(&capture);	// Release capture

	return EXIT_SUCCESS;
}
Exemplo n.º 8
0
////////////////////////////////////////////////////////////////////////////////////	
//以彩色图像显示每一尺度的张量信息
////////////////////////////////////////////////////////////////////////////////////
void Tensor::ShowTensorByColorImage()
{
	double ret_minr=0.0;
	double ret_maxr=0.0;
	double ret_ming=0.0;
	double ret_maxg=0.0;
	double ret_minb=0.0;
	double ret_maxb=0.0;
	int x,y,i;
	//纹理特征
	IplImage **pImg= new IplImage *[m_levels];
	for (i = 0;i < m_levels;i++)
	{
		pImg[i] = cvCreateImage( cvGetSize(m_img), m_img->depth, 3);
		cvZero(pImg[i]);
	}

	CString * ptitle=new CString [m_levels];

	for (i=0;i<m_levels;i++)
	{
		//找到每幅图像颜色通道的上限与下限值
		for (y=0; y<m_h;y++)
		{
			for (x=0;x<m_w;x++)
			{
				if((*m_pImageTensorRGB[i])(x,y).r>ret_maxr)
				{
					ret_maxr=(*m_pImageTensorRGB[i])(x,y).r;
				}
				if ((*m_pImageTensorRGB[i])(x,y).r<ret_minr)
				{
					ret_minr=(*m_pImageTensorRGB[i])(x,y).r;
				}

				if((*m_pImageTensorRGB[i])(x,y).g>ret_maxg)
				{
					ret_maxg=(*m_pImageTensorRGB[i])(x,y).g;
				}
				if ((*m_pImageTensorRGB[i])(x,y).g<ret_ming)
				{
					ret_ming=(*m_pImageTensorRGB[i])(x,y).g;
				}

				if((*m_pImageTensorRGB[i])(x,y).b>ret_maxb)
				{
					ret_maxb=(*m_pImageTensorRGB[i])(x,y).b;
				}
				if ((*m_pImageTensorRGB[i])(x,y).b<ret_minb)
				{
					ret_minb=(*m_pImageTensorRGB[i])(x,y).b;
				}

			}
		}
		uchar * dst=(uchar *)pImg[i]->imageData;
		for (y=0; y<m_h;y++)
		{
			for (x=0;x<m_w;x++)
			{
				int temp=y*(pImg[i]->widthStep)+3*x;
				dst[temp+2]=(uchar)(((*m_pImageTensorRGB[i])(x,y).r-ret_minr)/(ret_maxr-ret_minr)*256);
				dst[temp+1]=(uchar)(((*m_pImageTensorRGB[i])(x,y).g-ret_ming)/(ret_maxg-ret_ming)*256);
				dst[temp+0]=(uchar)(((*m_pImageTensorRGB[i])(x,y).b-ret_minb)/(ret_maxb-ret_minb)*256);
			}
		}
		ptitle[i].Format(_T("Image Texture of Level %d"),i);
		cvNamedWindow((char *)(LPCTSTR)ptitle[i],CV_WINDOW_AUTOSIZE);
		cvShowImage((char *)(LPCTSTR)ptitle[i],pImg[i]);
	}
	if (pImg != NULL)
	{
		for (i=0;i<m_levels;i++)
		{
			cvReleaseImage(&pImg[i]);
		}
		delete [] pImg;
	}
}
int main(int argc, char* argv[])
{
	IplImage *m_pPreImage = NULL;
	IplImage *m_pGrayImage = NULL;
	IplImage *m_pSmoothImage = NULL;
	IplImage *pPrev = NULL;
	IplImage *pCurr = NULL;
	IplImage *pDest = NULL;
	IplImage *pMask = NULL;
	IplImage *pMaskDest = NULL;
	IplImage *dst = NULL;
	CvMat *pPrevF = NULL;
	CvMat *pCurrF = NULL;
	CvSize imgSize;

    CvCapture *m_pCapture = NULL;
	CvVideoWriter *writer = 0;
	IplConvKernel* element;
	CvSeq* contour = 0;
	CvMemStorage* storage = cvCreateMemStorage(0);
	CvRect r;

	// IplConvKernel* element;

    cvNamedWindow( "VideoDisplay1", 1 );
	cvNamedWindow( "VideoDisplay2", 1 );
	cvNamedWindow( "VideoDisplay3", 1 );
	cvNamedWindow( "VideoDisplay4", 1 );
	
// Capture
	m_pCapture = cvCreateFileCapture("MVI_8833.AVI");
	contour = cvCreateSeq(CV_SEQ_ELTYPE_POINT,sizeof(CvSeq),sizeof(CvPoint),storage);
	

    if( !m_pCapture )
    {
        fprintf(stderr,"Could not initialize capturing! \n");
        return -1;
    }
// Display
    while ( (m_pPreImage = cvQueryFrame(m_pCapture)))
    {	
		imgSize = cvSize(m_pPreImage->width, m_pPreImage->height);
		if(!m_pGrayImage)
			m_pGrayImage = cvCreateImage(imgSize, IPL_DEPTH_8U, 1);
		if(!pCurr)
			pCurr = cvCreateImage(imgSize, IPL_DEPTH_8U, 1);	
		if(!m_pSmoothImage)
			m_pSmoothImage = cvCreateImage(imgSize, IPL_DEPTH_8U, 1);

		//图像预处理
		cvCvtColor(m_pPreImage, m_pGrayImage, CV_BGR2GRAY);//转化为灰度图像
		cvSmooth(m_pGrayImage,m_pSmoothImage,CV_GAUSSIAN,3,0,0,0 );//GAUSSIAN平滑去噪声
		cvEqualizeHist(m_pSmoothImage,pCurr );//直方图均衡


		 if(!pPrevF)
			pPrevF = cvCreateMat(m_pGrayImage->width,m_pPreImage->height, CV_32FC1);
		 if(!pCurrF)
			pCurrF = cvCreateMat(m_pGrayImage->width,m_pPreImage->height, CV_32FC1);
		 if(!pPrev)
			pPrev = cvCreateImage(imgSize, IPL_DEPTH_8U, 1);
		 if(!pMask)
			pMask = cvCreateImage(imgSize, IPL_DEPTH_8U, 1);
		 if(!pMaskDest)
			pMaskDest = cvCreateImage(imgSize, IPL_DEPTH_8U, 1);
		 if(!dst)
			dst = cvCreateImage(imgSize, IPL_DEPTH_8U, 1);
		 if(!pDest)
			{
				pDest = cvCreateImage(imgSize, IPL_DEPTH_8U, 1);
				
			}
	
		cvAbsDiff(pPrev, pCurr, pDest);   //帧差
		cvCopy(pCurr, pPrev, NULL);  // 当前帧存入前一帧

		
		cvThreshold(pDest, pMask, 80, 255, CV_THRESH_BINARY);     // 二值化
		element = cvCreateStructuringElementEx( 9, 9, 3, 3, CV_SHAPE_RECT, NULL);
		cvMorphologyEx( pMask, pMaskDest, NULL, element, CV_MOP_CLOSE, 1);//形态学处理
		
		//查找并且画出团块轮廓
		cvFindContours( pMaskDest, storage, &contour, sizeof(CvContour), CV_RETR_CCOMP, CV_CHAIN_APPROX_SIMPLE );

		//画出包含目标的最小矩形
		for(;contour;contour=contour->h_next)
		{
			r=((CvContour*)contour)->rect;
			if(r.height*r.width>100)
			{
				cvRectangle(m_pPreImage,cvPoint(r.x,r.y),cvPoint(r.x+r.width,r.y+r.height),CV_RGB(255,0,0),1,CV_AA,0);
				
			}
		}


		cvShowImage( "VideoDisplay1", m_pPreImage );
		cvShowImage( "VideoDisplay2", pMask);
		cvShowImage( "VideoDisplay3", pMaskDest );
		cvShowImage( "VideoDisplay4", pPrev );

		if(cvWaitKey(50)>0)
			return 0;
	}

	// Realease
    cvReleaseImage( &m_pPreImage );
	cvReleaseImage( &m_pGrayImage );
	cvReleaseImage( &m_pSmoothImage );
	cvReleaseImage( &pCurr );
	cvReleaseImage( &pDest );
	cvReleaseImage( &pMask );
	cvReleaseImage( &pMaskDest );
	cvReleaseImage( &dst );
	cvReleaseMemStorage( &storage );
    cvDestroyWindow("VideoDisplay1");
	cvDestroyWindow("VideoDisplay2");
	cvDestroyWindow("VideoDisplay3");
	cvDestroyWindow("VideoDisplay4");
	cvReleaseStructuringElement( &element ); 

	return 0;
}
Exemplo n.º 10
0
//////////////////////////////////////////////////////////////////////
// Construction/Destruction
//////////////////////////////////////////////////////////////////////
Tensor::Tensor(const IplImage *cv_image, BOOL isComputeGradient)
{
	//保存原图像的副本
	m_img=cvCreateImage(cvSize(cv_image->width,cv_image->height),cv_image->depth,3);
	cvCopyImage(cv_image,m_img);

	//获取非线性多尺度结构张量的参数值
	m_levels = 2;

	ASSERT(m_levels > 0 );

	m_dim = m_levels * SiNGLE_TENSOR_DIM;    //SiNGLE_TENSOR_DIM单一张量

	//SiNGLE_TENSOR_DIM=n(n+1)/2;反解n=m_axes_cnt,m_axes_cnt为坐标抽的维数
	m_axes_cnt = (unsigned int)(sqrt(2 * SiNGLE_TENSOR_DIM + 0.25) - 0.5);   // 2

	m_grad_dim = m_levels * m_axes_cnt;      //m_grad_dim


	////////////////////////////////////////////////////////////////////////////
	//将多通道转化为单通道,默认为三个通道
	unsigned int x,y,i,n;
	m_w = cv_image->width;
	m_h = cv_image->height;
	IplImage *cv_channels[3];
	for (n = 0;n < 3;n++)
	{
		cv_channels[n] = cvCreateImage( cvGetSize(cv_image), cv_image->depth, 1 );
	}
	cvSplit(cv_image, cv_channels[0], cv_channels[1], cv_channels[2], NULL);


	////////////////////////////////////////////////////////////////////////////
	//初始化m_tensor,CMatrix(m_h,m_w)创建一个矩阵,其元素全为0
	m_tensor = new CMatrix *[m_dim];
	for (i=0;i<m_dim;i++)
	{   
		m_tensor[i] = new CMatrix(m_h,m_w);
	}

	////////////////////////////////////////////////////////////////////////////
	//将每一尺度的张量转化为彩色图像存储起来,申请空间	
	m_pImageTensorRGB=new Image<Color_RGB> *[m_levels];
	for (i=0;i<m_levels;i++)
	{   
		m_pImageTensorRGB[i] = new Image<Color_RGB> (m_w,m_h);
	}

	//初始化m_gradient
	if (isComputeGradient)
	{
		m_gradient = new CMatrix *[m_grad_dim];
		for (i=0;i<m_grad_dim;i++)
		{
			m_gradient[i] = new CMatrix(m_h,m_w);
		}
	}
	else
	{
		m_gradient = NULL;
	}


	//辅助矩阵
	CMatrix image(m_h, m_w);
	CMatrix dx(m_h,m_w);
	CMatrix dy(m_h,m_w);
	CMatrix dx2(m_h,m_w);
	CMatrix dy2(m_h,m_w);
	CMatrix dxdy(m_h,m_w);

	//利用固定数据创建一个矩阵
	CvMat cv_dx2 = cvMat(m_h, m_w, CV_64FC1, dx2.GetData());
	CvMat cv_dy2 = cvMat(m_h, m_w, CV_64FC1, dy2.GetData());
	CvMat cv_dxdy =cvMat(m_h, m_w, CV_64FC1, dxdy.GetData());


	//完成IplImage向CMatrix类型的转换,对每一个颜色通道分别进行处理
	for (n = 0;n <3;n++)	//n表示通道数,默认为3
	{  
		//将每一个通道的元素拷贝到image中
		for (y = 0; y < m_h; y++)
		{
			for (x = 0; x < m_w; x++)
			{
				uchar* dst = &CV_IMAGE_ELEM( cv_channels[n], uchar, y, x );
				image.SetElement(y, x, (double)(dst[0]));
			}
		}
		//计算每一个颜色通道的梯度(x方向,y方向)并分别赋给dx,dy
		image.centdiffX(dx);
		image.centdiffY(dy);

		//将dx,dy分别赋给cv_dx,cv_dy
		CvMat cv_dx = cvMat(m_h, m_w, CV_64FC1, dx.GetData());
		CvMat cv_dy = cvMat(m_h, m_w, CV_64FC1, dy.GetData());

		//初始化cv_tensor0,cv_tensor1,cv_tensor2,此时m_tensor[0],m_tensor[1],m_tensor[2]均初始化0
		CvMat cv_tensor0 = cvMat(m_h, m_w, CV_64FC1, (m_tensor[0])->GetData());
		CvMat cv_tensor1 = cvMat(m_h, m_w, CV_64FC1, (m_tensor[1])->GetData());
		CvMat cv_tensor2 = cvMat(m_h, m_w, CV_64FC1, (m_tensor[2])->GetData());

		//计算图像的梯度,保存在cv_gradX,cv_gradY中,并赋值给m_gradient[0],m_gradient[1]
		if (isComputeGradient)
		{   
			//cv_gradX,cv_gradY初始化并计算
			CvMat cv_gradX = cvMat(m_h, m_w, CV_64FC1, (m_gradient[0])->GetData());
			CvMat cv_gradY = cvMat(m_h, m_w, CV_64FC1, (m_gradient[1])->GetData());
			cvAdd(&cv_gradX, &cv_dx, &cv_gradX);//对于三个通道进行累加
			cvAdd(&cv_gradY, &cv_dy, &cv_gradY);
		}

		//计算结构张量,cv_tensor0=dx*dx,cv_tensor1=dy*dy,cv_tensor2=dx*dy
		cvMul(&cv_dx, &cv_dx, &cv_dx2);
		cvAdd(&cv_tensor0, &cv_dx2, &cv_tensor0);
		cvMul(&cv_dy, &cv_dy, &cv_dy2);
		cvAdd(&cv_tensor1, &cv_dy2, &cv_tensor1);
		cvMul(&cv_dx, &cv_dy, &cv_dxdy);
		cvAdd(&cv_tensor2, &cv_dxdy, &cv_tensor2);


		//单尺度计算完毕,以下为多尺度非线性结构张量的计算方法
		if (m_levels > 1)
		{   
			unsigned int wavelet_levels = m_levels - 1;	//-1的原因是因为之前没有if (m_levels==1)的判断语句	
			double dMaxValue,dMinValue;
			cvMinMaxLoc(cv_channels[n], &dMinValue, &dMaxValue);//Finds global minimum, maximum 

			//将图像的像素值归一化到[0,1]
			Wavelet *wave = new Wavelet(&image, dMinValue, dMaxValue, wavelet_levels); //调用Wavelet的构造函数

			//新建WaveletDetailImages结构体的数组
			WaveletDetailImages *D_images = new WaveletDetailImages[wavelet_levels];

			for (i = 0; i < wavelet_levels; i++)
			{
				D_images[i].Detail_1 = new CMatrix(m_h, m_w);
				D_images[i].Detail_2 = new CMatrix(m_h, m_w);
			}

			wave->execute(D_images);//得到D(s,x),D(s,y)

			for (i = 0; i < wavelet_levels; i++)
			{   
				//默认多尺度结构张量的比例因子a=2
				double scale = pow((float)0.25, (int)(i + 1));              //见公式(2-15)
				CvMat cv_dx = cvMat(m_h, m_w, CV_64FC1, D_images[i].Detail_1->GetData());
				CvMat cv_dy = cvMat(m_h, m_w, CV_64FC1, D_images[i].Detail_2->GetData());
				CvMat cv_tensor0 = cvMat(m_h, m_w, CV_64FC1, (m_tensor[(i+1) * SiNGLE_TENSOR_DIM])->GetData());
				CvMat cv_tensor1 = cvMat(m_h, m_w, CV_64FC1, (m_tensor[(i+1) * SiNGLE_TENSOR_DIM + 1])->GetData());
				CvMat cv_tensor2 = cvMat(m_h, m_w, CV_64FC1, (m_tensor[(i+1) * SiNGLE_TENSOR_DIM + 2])->GetData());
				//计算梯度
				if (isComputeGradient)
				{
					CvMat cv_gradX = cvMat(m_h, m_w, CV_64FC1, (m_gradient[(i+1) * m_axes_cnt])->GetData());
					CvMat cv_gradY = cvMat(m_h, m_w, CV_64FC1, (m_gradient[(i+1) * m_axes_cnt + 1])->GetData());
					cvAdd(&cv_gradX, &cv_dx, &cv_gradX);
					cvAdd(&cv_gradY, &cv_dy, &cv_gradY);
				}
				//计算张量
				cvMul(&cv_dx, &cv_dx, &cv_dx2, scale);
				cvAdd(&cv_tensor0, &cv_dx2, &cv_tensor0);
				cvMul(&cv_dy, &cv_dy, &cv_dy2, scale);
				cvAdd(&cv_tensor1, &cv_dy2, &cv_tensor1);
				cvMul(&cv_dx, &cv_dy, &cv_dxdy, scale);
				cvAdd(&cv_tensor2, &cv_dxdy, &cv_tensor2);
			}
			for (i = 0; i < wavelet_levels; i++)
			{
				delete D_images[i].Detail_1;
				delete D_images[i].Detail_2;
			}
			delete [] D_images;
			delete wave;
		}
		cvReleaseImage(&cv_channels[n]);
	}

	//将每一尺度的结构张量转换为彩色图像存储起来
	for (i=0;i<m_levels;i++)
	{
		for (y=0;y<m_h;y++)
		{
			for (x=0;x<m_w;x++)
			{
				(*m_pImageTensorRGB[i])(x,y).r=(m_tensor[i*SiNGLE_TENSOR_DIM])->GetElement(y,x);
				(*m_pImageTensorRGB[i])(x,y).g=(m_tensor[i*SiNGLE_TENSOR_DIM+1])->GetElement(y,x);
				(*m_pImageTensorRGB[i])(x,y).b=(m_tensor[i*SiNGLE_TENSOR_DIM+2])->GetElement(y,x);
			}
		}
	}
	m_tensors = NULL;	
}
Exemplo n.º 11
0
Tensor::~Tensor()
{
	if (m_tensor != NULL)
	{
		for (int i=0;i<m_dim;i++)
		{
			if (m_tensor[i] != NULL)
			{
				delete m_tensor[i];
				m_tensor[i] = NULL;
			}
		}	
		delete [] m_tensor;
		m_tensor = NULL;
	}

	if (m_pImageTensorRGB != NULL)
	{
		for (int i=0;i<m_levels;i++)
		{
			if (m_pImageTensorRGB[i] != NULL)
			{
				delete m_pImageTensorRGB[i];
				m_pImageTensorRGB[i] = NULL;
			}
		}	
		delete [] m_pImageTensorRGB;
		m_pImageTensorRGB = NULL;
	}

	if (m_gradient != NULL)
	{
		for (int i=0;i<m_grad_dim;i++)
		{
			if (m_gradient[i] != NULL)
			{
				delete m_gradient[i];
				m_gradient[i] = NULL;
			}
		}	
		delete [] m_gradient;
		m_gradient = NULL;
	}


	if (m_tensors != NULL)
	{
		unsigned int x,y;
		for (y = 0; y < m_h; y++)
		{
			for (x = 0; x < m_w; x++)
			{
				if ((*m_tensors)(x,y) != NULL)
				{
					delete (*m_tensors)(x,y);
					(*m_tensors)(x,y) = NULL;
				}
			}
		}
		delete m_tensors;
		m_tensors = NULL;
	}
	if (m_img!=NULL)
	{
		cvReleaseImage(&m_img);
		m_img=NULL;
	}
}
Exemplo n.º 12
0
COpenCVMFCDoc::~COpenCVMFCDoc()
{
	// Release Image
	if (pImg)
		cvReleaseImage(&pImg);
}
Exemplo n.º 13
0
static IplImage *_threshold(IplImage *in) {
    IplImage *img = cvCreateImage(cvGetSize(in), 8, 1);

    // convert to grayscale
    cvCvtColor(in, img, CV_BGR2GRAY);

    // compute the mean intensity. This is used to adjust constant_reduction value below.
    long total = 0;
    for (int x = 0; x < img->width; ++x) {
        for (int y = 0; y < img->height; ++y) {
            CvScalar s = cvGet2D(img, y, x);
            total += s.val[0];
        }
    }
    int mean_intensity = (int)(total / (img->width * img->height));

    // apply thresholding (converts it to a binary image)
    // block_size observations: higher value does better for images with variable lighting (e.g.
    //   shadows).
    // may eventually need to paramaterize this, to some extent, because the different callers
    //   seem to do better with different values (e.g. contour location is better with smaller numbers,
    //   but cage location is better with larger...) but for now, have been able to settle on value
    //   which works pretty well for most cases.
    int block_size = (int)(img->width / 9);
    if ((block_size % 2) == 0) {
        // must be odd
        block_size += 1;
    }
    // constant_reduction observations: magic, but adapting this value to the mean intensity of the
    //   image as a whole seems to help.
    int constant_reduction = (int)(mean_intensity / 3.6 + 0.5);

    IplImage *threshold_image = cvCreateImage(cvGetSize(img), 8, 1);
    cvAdaptiveThreshold(img, threshold_image, 255, CV_ADAPTIVE_THRESH_MEAN_C, CV_THRESH_BINARY_INV,
        block_size, constant_reduction);
    cvReleaseImage(&img);

    // try to get rid of "noise" spots.
    int min_blob_size = 2;
    for (int x = 0; x < threshold_image->width; ++x) {
        for (int y = 0; y < threshold_image->height; ++y) {
            CvScalar s = cvGet2D(threshold_image, y, x);
            int ink_neighbors = 0;
            if (s.val[0] == 255) {
                for (int dx = -1; dx <= 1; ++dx) {
                    if ((x + dx >= 0) && (x + dx < threshold_image->width)) {
                        for (int dy = -1; dy <= 1; ++dy) {
                            if ((y + dy >= 0) && (y + dy < threshold_image->height)) {
                                if (! ((dy == 0) && (dx == 0))) {
                                    CvScalar m = cvGet2D(threshold_image, y + dy, x + dx);
                                    if (m.val[0] == 255) {
                                        ++ink_neighbors;
                                        if (ink_neighbors > min_blob_size) {
                                            break;
                                        }
                                    }
                                }
                            }
                        }
                        if (ink_neighbors > min_blob_size) {
                            break;
                        }
                    }
                }
                if (ink_neighbors <= min_blob_size) {
                    s.val[0] = 0;
                    cvSet2D(threshold_image, y, x, s);
                }
            }
        }
    }

    return threshold_image;
}
Exemplo n.º 14
0
const CvPoint2D32f* locate_puzzle(IplImage *in, IplImage **annotated) {
    IplImage *grid_image = _grid(in);

    *annotated = cvCloneImage(in);

    // find lines using Hough transform
    CvMemStorage* storage = cvCreateMemStorage(0);
    CvSeq* lines = 0;

    double distance_resolution = 1;
    double angle_resolution    = CV_PI / 60;
    int threshold              = 60;
    int minimum_line_length    = in->width / 2;
    int maximum_join_gap       = in->width / 10;
    lines = cvHoughLines2(grid_image, storage, CV_HOUGH_PROBABILISTIC,  distance_resolution, angle_resolution, threshold, minimum_line_length, maximum_join_gap);

    cvCvtColor(grid_image, *annotated, CV_GRAY2RGB);

    cvReleaseImage(&grid_image);

    double most_horizontal = INFINITY;
    for (int i = 0; i < lines->total; ++i) {
        CvPoint *line = (CvPoint*)cvGetSeqElem(lines,i);
        double dx     = abs(line[1].x - line[0].x);
        double dy     = abs(line[1].y - line[0].y);

        double slope = INFINITY;
        if (dx != 0) {
            slope = dy / dx;
        }
        if (slope != INFINITY) {
            if (slope < most_horizontal) {
                //printf("most horizontal seen: %0.2f\n", slope);
                most_horizontal = slope;
            }
        }
    }

    int top    = -1;
    int left   = -1;
    int bottom = -1;
    int right  = -1;
    for (int i = 0; i < lines->total; i++) {
        CvPoint* line = (CvPoint*)cvGetSeqElem(lines,i);
        double dx     = abs(line[1].x - line[0].x);
        double dy     = abs(line[1].y - line[0].y);
        double slope  = INFINITY;
        if (dx) {
            slope = dy / dx;
        }

        cvLine(*annotated, line[0], line[1], CV_RGB(255, 0, 0), 1, 8, 0);
        if (abs(slope - most_horizontal) <= 1) {
            if ((top == -1) || (line[1].y < ((CvPoint*)cvGetSeqElem(lines,top))[1].y)) {
                top = i;
            }
            if ((bottom == -1) || (line[1].y > ((CvPoint*)cvGetSeqElem(lines,bottom))[1].y)) {
                bottom = i;
            }
        } else {
            if ((left == -1) || (line[1].x < ((CvPoint*)cvGetSeqElem(lines,left))[1].x)) {
                left = i;
            }
            if ((right == -1) || (line[1].x > ((CvPoint*)cvGetSeqElem(lines,right))[1].x)) {
                right = i;
            }
        }
    }
    //printf("number of lines: %d\n", lines->total);
    if ((top == -1) || (left == -1) || (bottom == -1) || (right == -1)) {
        return NULL;
    }

    CvPoint *top_line    = (CvPoint*)cvGetSeqElem(lines,top);
    cvLine(*annotated, top_line[0], top_line[1], CV_RGB(0, 0, 255), 6, 8, 0);

    CvPoint *bottom_line = (CvPoint*)cvGetSeqElem(lines,bottom);
    cvLine(*annotated, bottom_line[0], bottom_line[1], CV_RGB(0, 255, 255), 6, 8, 0);

    CvPoint *left_line   = (CvPoint*)cvGetSeqElem(lines,left);
    cvLine(*annotated, left_line[0], left_line[1], CV_RGB(0, 255, 0), 6, 8, 0);

    CvPoint *right_line  = (CvPoint*)cvGetSeqElem(lines,right);
    cvLine(*annotated, right_line[0], right_line[1], CV_RGB(255, 255, 0), 6, 8, 0);

    CvPoint2D32f *coordinates;
    coordinates = malloc(sizeof(CvPoint2D32f) * 4);

    // top left
    intersect(top_line, left_line, &(coordinates[0]));
    cvLine(*annotated, cvPointFrom32f(coordinates[0]), cvPointFrom32f(coordinates[0]), CV_RGB(255, 255, 0), 10, 8, 0);

    //printf("top_left: %.0f, %.0f\n", coordinates[0].x, coordinates[0].y);

    // top right
    intersect(top_line, right_line, &(coordinates[1]));
    cvLine(*annotated, cvPointFrom32f(coordinates[1]), cvPointFrom32f(coordinates[1]), CV_RGB(255, 255, 0), 10, 8, 0);

    //printf("top_right: %.0f, %.0f\n", coordinates[1].x, coordinates[1].y);

    // bottom right
    intersect(bottom_line, right_line, &(coordinates[2]));
    cvLine(*annotated, cvPointFrom32f(coordinates[2]), cvPointFrom32f(coordinates[2]), CV_RGB(255, 255, 0), 10, 8, 0);

    //printf("bottom_right: %.0f, %.0f\n", coordinates[2].x, coordinates[2].y);

    // bottom left
    intersect(bottom_line, left_line, &(coordinates[3]));
    cvLine(*annotated, cvPointFrom32f(coordinates[3]), cvPointFrom32f(coordinates[3]), CV_RGB(255, 255, 0), 10, 8, 0);

    //printf("bottom_left: %.0f, %.0f\n", coordinates[3].x, coordinates[3].y);

    return coordinates;
}
Exemplo n.º 15
0
 void Capture::setPreviousFrame(IplImage* image) {
     if (previousFrame != NULL) cvReleaseImage(&previousFrame);
     previousFrame = image;
 }
	//添加矩形处理函数
	void CRecognitionTextView::OnAdd()
	{
		//获取实际位置
		int up = int((downPoint.y+m_nVScrollPos) * resizeY);
		int down = int((upPoint.y+m_nVScrollPos) * resizeY);
		int left = int(downPoint.x - xSt)* resizeX;
		int right = int(upPoint.x - xSt) * resizeX;

		int new_width = right - left;
		int new_height = down - up;

		//判断是否接收处理
		if(new_width*new_height < 9)
			return;
		if(lines.size() <= 0)
			return;
		//获取位置然后获取该区域

		IplImage *newSrc = cvCreateImage(cvSize(new_width,new_height),8,1);
		CopyData(src,newSrc,left,up);

		//cvSmooth(newSrc,newSrc);
		tools.Normal(newSrc);
		cvSmooth(newSrc,newSrc);
		//获取边缘
		IplImage *edge = cvCreateImage(cvSize(new_width,new_height),8,1);
		tools.getEdge(newSrc,edge);

		//二值化
		IplImage *throld = cvCreateImage(cvSize(new_width,new_height),8,1);
		tools.OtsuTheld(edge,throld);

		/*cvShowImage("",throld);
		cvWaitKey();*/

		int xSt = new_width,xEn = 0;
		int ySt = new_height,yEn = 0;

		for(int i = 2 ;i < throld->width -2 ;i++)
		{
			for(int j = 2;j < throld->height - 2;j++)
			{
				if((uchar)throld->imageData[j*throld->widthStep + i] == 0) 
				{
					if(i < xSt)
						xSt = i;

					if(i > xEn)
						xEn = i;

					if(j < ySt)
						ySt = j;

					if(j > yEn)
						yEn = j;
				}
			}
		}

		/*for(int i = xSt;i < xEn ;i++)
		{
			for(int j = ySt; j< yEn ;j++)
			{
				newSrc->imageData[j*newSrc->widthStep + i] = 0;
			}
		}*/

		struct OutLine tempoutline;
		tempoutline.xSt = xSt + left;
		tempoutline.xEnd = xEn + left;
		tempoutline.ySt = ySt + up;
		tempoutline.yEnd = yEn + up;
		//判断在哪一行
		int cent = (ySt + yEn)/2 + up;
		int temp = 1000;
		int mark;

		for(int i = 0;i<lines.size();i++)
		{
			if(abs(cent - lines[i]) < temp)
			{
				temp = abs(cent - lines[i]);
				mark = i;
			}
		}

		//插入改行
		bool st = false;
		int tempCode  = 0;
		for(int i = mark;i < outlineSs.size();i++)
		{
			for(int j = 0; j< outlineSs.at(i).size();j++)
			{
				if(outlineSs.at(i).at(j).Code > tempCode)
					tempCode = outlineSs.at(i).at(j).Code;

				if(!st && i== mark && j == outlineSs.at(i).size()-1)
				{
					tempoutline.Code = tempCode;

					outlineSs.at(i).push_back(tempoutline);

					st = true;

					j++;
				}

				if(outlineSs.at(i).at(j).Code == -1)
					continue;

				if(!st)
				{
					if(outlineSs.at(i).at(j).xSt > tempoutline.xSt)
					{
						tempoutline.Code = outlineSs.at(i).at(j).Code;
						//outlineSs.at(i).at(j).Code ++;
						//插入该元素
						outlineSs.at(i).insert(outlineSs.at(i).begin()+j,tempoutline);
						st = true;
					}
				}
				else
				{
					outlineSs.at(i).at(j).Code++;
				}
			}
		}


		cvReleaseImage(&newSrc);
		cvReleaseImage(&edge);
		cvReleaseImage(&throld);

		OnPaint();
	}
Exemplo n.º 17
0
IplImage* Panoramic::StitchFace(const char *leftName, const char *centerName,const char *rightName)
{
	IplImage   *leftHsvImg,*centerHsvImg,*rightHsvImg;
	vector<Coordinate> leftCoord;
	vector<Coordinate> rightCoord;
	vector<Coordinate> centerCoord;
	vector<Coordinate> profileCoord(3);
	vector<Coordinate> centerAffineCoord(3);
	IplImage   *leftAffineImg     = cvCreateImage(cvSize(m_width,m_height),8,1);
	IplImage   *rightAffineImg    = cvCreateImage(cvSize(m_width,m_height),8,1);
	IplImage   *leftFeatureImg    = cvLoadImage(leftName,1);
	IplImage   *centerFeatureImg  = cvLoadImage(centerName,1);
	IplImage   *rightFeatureImg   = cvLoadImage(rightName,1); 
	cvZero(rightAffineImg);
	cvZero(leftAffineImg);

	//Using red color threshold to find the features from input image
	leftHsvImg  = GetHsvFeature(leftFeatureImg   ,0,255,255,51,51,51);
	centerHsvImg= GetHsvFeature(centerFeatureImg ,0,255,255,51,51,51);
	rightHsvImg = GetHsvFeature(rightFeatureImg  ,0,255,255,51,51,51);
	//FindFeatureCoord will decide whether it continues or not.
    leftCoord   =  FindFeatureCoord(leftHsvImg);
	rightCoord  =  FindFeatureCoord(rightHsvImg);
	centerCoord =  FindFeatureCoord(centerHsvImg);
	
	if(m_do_sttich)//when all number of feature coord = 12,it will be true,it decide in function:"FindFeatureCoord"
	{
		RearrangeCoord(leftCoord);
		RearrangeCoord(rightCoord);
		RearrangeCoord(centerCoord);

		for(int i = 0; i < m_numFeature; i++) 
		{
			m_centerCood[i] = centerCoord[i];
		}
		if(m_debug)
		{
			ShowFeature(leftCoord);
			ShowFeature(centerCoord);
			ShowFeature(rightCoord);
		}
	
		Graphic FindLine;

		for(int numStitch = 0; numStitch < 2;numStitch++)
		{
			for(int num = 0;num < 3;num++)
			{
				if(numStitch == 1)
				{
					if(num==0)
					{
						profileCoord[0]         = leftCoord[1];
						centerAffineCoord[0]    = centerCoord[1];
					}
					else
					{
						profileCoord[num]       = leftCoord[num+2];
						centerAffineCoord[num]  = centerCoord[num+2];
					}
				}

				else
				{
					if(num==0)
					{
						profileCoord[0]			= rightCoord [7];
						centerAffineCoord[0]	= centerCoord[7];
					}
					else
					{
						profileCoord[num]       = rightCoord [num+8];
						centerAffineCoord[num]  = centerCoord[num+8];
					}
				}
			}
			//Á_¦X¥ª°¼Áy
			if(numStitch == 1)
			{
				FindLine.Line(centerAffineCoord,0,centerAffineCoord,2,m_slope,m_intercept);
				DoAffineTrsform(m_leftImg,leftAffineImg,profileCoord,centerAffineCoord);
				if(m_debug)
				{
					cvNamedWindow("leftAffineImg",0);
					cvShowImage("leftAffineImg",leftAffineImg);
				}
				ShowStitch(leftAffineImg,m_centerImg); //°¼ÁyÁ_¦X¡B½u©Ê¼Ò½k¤Æ
			}
			//Á_¦X¥k°¼Áy
			else
			{
				FindLine.Line(centerAffineCoord,0,centerAffineCoord,2,m_slope,m_intercept);
				DoAffineTrsform(m_rightImg,rightAffineImg,profileCoord,centerAffineCoord);
				if(m_debug)
				{
					cvNamedWindow("rightAffineImg",0);
					cvShowImage("rightAffineImg",rightAffineImg);
				}
				ShowStitch(rightAffineImg,m_centerImg);
			}
				m_saveSlope[numStitch]		=	m_slope;
				m_saveIntercept[numStitch]	=	m_intercept;
		
		}

		//Á_¦X¥¿Áy
		for(int j = 0;j < m_height;j++)
		{
			for(int i = 0;i < m_width;i++)
			{
				double linePostionright = m_saveSlope[0]*i + m_saveIntercept[0]-j;
				double linePostionleft  = m_saveSlope[1]*i + m_saveIntercept[1]-j;

				if(linePostionright > m_lineT && linePostionleft > m_lineT)
				{
					double pixel = cvGetReal2D(m_centerImg,j,i);
					cvSetReal2D(m_PanoramicFace,j,i,pixel) ;
				}
			}
		}
		if(m_debug)
		{
			cvNamedWindow("PanoramicFace",0);
			cvShowImage("PanoramicFace",m_PanoramicFace);
			cvWaitKey(0);
		}
		cvReleaseImage(&leftHsvImg);	cvReleaseImage(&centerHsvImg);		cvReleaseImage(&rightHsvImg);
		cvReleaseImage(&leftAffineImg);	cvReleaseImage(&rightAffineImg);
		cvReleaseImage(&leftFeatureImg);cvReleaseImage(&centerFeatureImg);	cvReleaseImage(&rightFeatureImg);
		return m_PanoramicFace;
	}
	else
	{
		printf("Error when stich image....");
		return NULL;
	}
	
}
Exemplo n.º 18
0
int main( int argc, char** argv )
{
	IplImage *tpl = 0;
	IplImage *ref = 0;
	IplImage *poc = 0;
	char String[255];
	
	if( argc < 3 ) {
		fprintf( stderr, "Usage: phase_correlation <url1> <url2>\n" );
		return 1;	
	}
	sprintf(String, "wget %s -O image1.jpg", argv[1]);
        system(String);
	sprintf(String, "wget %s -O image2.jpg", argv[2]);
        system(String);	

	/* load reference image */
	ref = cvLoadImage( "image1.jpg", CV_LOAD_IMAGE_GRAYSCALE );
	
	/* always check */
	if( ref == 0 ) {
		fprintf( stderr, "Cannot load %s!\n", argv[1] );
		return 1;	
	}
	
	/* load template image */
	tpl = cvLoadImage( "image2.jpg", CV_LOAD_IMAGE_GRAYSCALE );
	
	/* always check */
	if( tpl == 0 ) {
		fprintf( stderr, "Cannot load %s!\n", argv[2] );
		return 1;	
	}
	
	/* both images' size should be equal */
	if( ( tpl->width != ref->width ) || ( tpl->height != ref->height ) ) {
		fprintf( stderr, "Both images must have equal width and height!\n" );
		return 1;
	}
	
	/* create a new image, to store phase correlation result */
	poc = cvCreateImage( cvSize( tpl->width, tpl->height ), IPL_DEPTH_64F, 1 );
	
	/* get phase correlation of input images */
	phase_correlation( ref, tpl, poc );
	
	/* find the maximum value and its location */
    CvPoint minloc, maxloc;
	double  minval, maxval;
	cvMinMaxLoc( poc, &minval, &maxval, &minloc, &maxloc, 0 );
	
	/* print it */
	fprintf( stdout, "Maxval at (%d, %d) = %2.4f\n", maxloc.x, maxloc.y, maxval );
        fprintf(stdout,"percentage comparision= %2.4f \n",maxval*100);
	
	/* display images and free memory */
	cvNamedWindow( "tpl", CV_WINDOW_AUTOSIZE );
	cvNamedWindow( "ref", CV_WINDOW_AUTOSIZE );	
	
	cvShowImage( "tpl", tpl );
	cvShowImage( "ref", ref );
	
	cvWaitKey( 0 );
	
	cvDestroyWindow( "tpl" );
	cvDestroyWindow( "ref" );	
	
	cvReleaseImage( &tpl );
	cvReleaseImage( &ref );
	cvReleaseImage( &poc );
	
	return 0;
}
Exemplo n.º 19
0
int main(int argc, const char **argv)
{
    //Variables
    int degrees,PosRelX,PosRelY;
    float radians,Dlaser,ODM_ang, ang;
    int width = 500, height = 500; //Create the size of the map here (in pixel)
    int centroX = (width / 2);
    int centroY = (height / 2);
    playerc_client_t *client;
    playerc_laser_t *laser;
    playerc_position2d_t *position2d;
    CvPoint pt,pt1,pt2;
    CvScalar cinzaE,preto,cinzaC;
    char window_name[] = "Map";

    IplImage* image = cvCreateImage( cvSize(width,height), 8, 3 );
    cvNamedWindow(window_name, 1 );
    preto = CV_RGB(0, 0, 0);        //for indicating obstacles
    cinzaE = CV_RGB(92, 92, 92);    //To indicate the stranger
    cinzaC = CV_RGB(150, 150, 150); //To indicate free spaces

    client = playerc_client_create(NULL, "localhost", 6665);
    if (playerc_client_connect(client) != 0)
    return -1;

    laser = playerc_laser_create(client, 0);
    if (playerc_laser_subscribe(laser, PLAYERC_OPEN_MODE))
    return -1;

    position2d = playerc_position2d_create(client, 0);
    if (playerc_position2d_subscribe(position2d, PLAYERC_OPEN_MODE) != 0) {
        fprintf(stderr, "error: %s\n", playerc_error_str());
        return -1;
    }

    if (playerc_client_datamode (client, PLAYERC_DATAMODE_PULL) != 0) {
        fprintf(stderr, "error: %s\n", playerc_error_str());
        return -1;
    }

    if (playerc_client_set_replace_rule (client, -1, -1, PLAYER_MSGTYPE_DATA, -1, 1) != 0) {
        fprintf(stderr, "error: %s\n", playerc_error_str());
        return -1;
    }

    playerc_position2d_enable(position2d, 1);  // initialise motors
    playerc_position2d_set_odom(position2d, 0, 0, 0);  // Set odometer to zero

    cvSet(image, cinzaE,0); //set the image colour to dark
    pt.x = centroX;  // Zero coordinate for x
    pt.y = centroY;  // Zero coordinate for y


    while(1) {
        playerc_client_read(client);
        cvSaveImage("mapa.jpg",image);
        playerc_client_read(client);

        for (degrees = 2; degrees <= 360; degrees+=2) {
            Dlaser = laser->scan[degrees][0];
            if (Dlaser < 8) {
                radians = graus2rad (degrees/2);      //Convert the angle of the laser to radians
                ODM_ang = position2d->pa;             //Obtain the angle relative to the robot
                ang = ((1.5*PI)+radians+ODM_ang);     //Converte the angle relative to the world
                PosRelX = arredonda(position2d->px);  //Position x relative to robot
                PosRelY = arredonda(position2d->py);  //Position y relative to robot
                pt1.y = (centroY-PosRelY);            //Co-ordinated global y of the robot
                pt1.x = (centroX+PosRelX);            //Co-ordinated global x of the robot

 //t converts polar coordinates for rectangular (global)
                pt.y = (int)(pt1.y-(sin(ang)*Dlaser*10));
                pt.x = (int)(pt1.x+(cos(ang)*Dlaser*10));

                //The free area draws cvline
                cvLine(image, pt1,pt,cinzaC, 1,4,0);

                //marks the object in the map
                cvLine(image, pt,pt,preto, 1,4,0);

                //Shows the result of the map to the screen
                cvShowImage(window_name, image );
                cvWaitKey(10);
            }
        }
    }

    //Disconnect player
    playerc_laser_unsubscribe(laser);
    playerc_laser_destroy(laser);
    playerc_client_disconnect(client);
    playerc_client_destroy(client);

    //Destroy the OpenCV window cvReleaseImage
    cvReleaseImage(&image);
    cvDestroyWindow(window_name);
    return 0;
}
Exemplo n.º 20
0
/*
 * This function overlays the illumination frame translucently
 * over the original image.
 * It also draws the worm's boundary and the worm's head and tail.
 *
 */
void DisplayWormHUDS(WormAnalysisData* Worm, WormAnalysisParam* Params, Frame* IlluminationFrame,char* WindowName){
	IplImage* TempImage =cvCreateImage(Worm->SizeOfImage,IPL_DEPTH_8U,1);
	CreateWormHUDS(TempImage,Worm,Params,IlluminationFrame);
	cvShowImage(WindowName,TempImage);
	cvReleaseImage(&TempImage);
}
Exemplo n.º 21
0
int calc_hsv_colors(IplImage *frame)
{
    if(!frame)
        return -1;

    IplImage* image=0, *hsv=0, *dst=0, *dst2=0, *color_indexes=0, *dst3=0;

    image = cvCloneImage(frame);
    hsv = cvCreateImage( cvGetSize(image), IPL_DEPTH_8U, 3 );
    cvCvtColor( image, hsv, CV_BGR2HSV );

    // for store results
    dst = cvCreateImage( cvGetSize(image), IPL_DEPTH_8U, 3 );
    dst2 = cvCreateImage( cvGetSize(image), IPL_DEPTH_8U, 3 );
    color_indexes = cvCreateImage( cvGetSize(image), IPL_DEPTH_8U, 1 ); // store color indexes

    // для хранения RGB-х цветов
    CvScalar rgb_colors[NUM_COLOR_TYPES];

    int i=0, j=0, x=0, y=0;

    // reset colors
    memset(colorCount, 0, sizeof(colorCount));
    for(i=0; i<NUM_COLOR_TYPES; i++) {
        rgb_colors[i] = cvScalarAll(0);
    }

    for (y=0; y<hsv->height; y++) {
        for (x=0; x<hsv->width; x++) {

            // get HSV pixel
            uchar H = CV_PIXEL(uchar, hsv, x, y)[0];	// Hue
            uchar S = CV_PIXEL(uchar, hsv, x, y)[1];	// Saturation
            uchar V = CV_PIXEL(uchar, hsv, x, y)[2];	// Value (Brightness)

            // define pixel color type
            int ctype = getPixelColorType(H, S, V);

            // set values
            CV_PIXEL(uchar, dst, x, y)[0] = cCTHue[ctype];	// Hue
            CV_PIXEL(uchar, dst, x, y)[1] = cCTSat[ctype];	// Saturation
            CV_PIXEL(uchar, dst, x, y)[2] = cCTVal[ctype];	// Value

            // collect RGB
            rgb_colors[ctype].val[0] += CV_PIXEL(uchar, image, x, y)[0]; // B
            rgb_colors[ctype].val[1] += CV_PIXEL(uchar, image, x, y)[1]; // G
            rgb_colors[ctype].val[2] += CV_PIXEL(uchar, image, x, y)[2]; // R

            // сохраняем к какому типу относится цвет
            CV_PIXEL(uchar, color_indexes, x, y)[0] = ctype;

            // подсчитываем :)
            colorCount[ctype]++;
        }
    }

    // усреднение RGB-составляющих
    for(i=0; i<NUM_COLOR_TYPES; i++) {
        rgb_colors[i].val[0] /= colorCount[i];
        rgb_colors[i].val[1] /= colorCount[i];
        rgb_colors[i].val[2] /= colorCount[i];
    }

    // теперь загоним массив в вектор и отсортируем :)
    std::vector< std::pair< int, uint > > colors;
    colors.reserve(NUM_COLOR_TYPES);

    for(i=0; i<NUM_COLOR_TYPES; i++){
        std::pair< int, uint > color;
        color.first = i;
        color.second = colorCount[i];
        colors.push_back( color );
    }
    // сортируем
    std::sort( colors.begin(), colors.end(), colors_sort );

    // для отладки - выводим коды, названия цветов и их количество
    for(i=0; i<colors.size(); i++){
        printf("[i] color %d (%s) - %d\n", colors[i].first, sCTypes[colors[i].first], colors[i].second );
    }

    // выдаём код первых цветов
    printf("[i] color code: \n");
    for(i=0; i<NUM_COLOR_TYPES; i++)
        printf("%02d ", colors[i].first);
    printf("\n");
    printf("[i] color names: \n");
    for(i=0; i<NUM_COLOR_TYPES; i++)
        printf("%s ", sCTypes[colors[i].first]);
    printf("\n");

#if 0
    cvSaveImage("image.bmp", image);
#endif

    cvReleaseImage(&image);
    cvReleaseImage(&hsv);
    cvReleaseImage(&dst);
    cvReleaseImage(&dst2);
    cvReleaseImage(&color_indexes);
    cvReleaseImage(&dst3);

    return 0;
}
Exemplo n.º 22
0
/*
 * Smooths, thresholds and finds the worms contour.
 * The original image must already be loaded into Worm.ImgOrig
 * The Smoothed image is deposited into Worm.ImgSmooth
 * The thresholded image is deposited into Worm.ImgThresh
 * The Boundary is placed in Worm.Boundary
 *
 */
void FindWormBoundary(WormAnalysisData* Worm, WormAnalysisParam* Params){
	/** This function currently takes around 5-7 ms **/
	/**
	 * Before I forget.. plan to make this faster by:
	 *  a) using region of interest
	 *  b) decimating to make it smaller (maybe?)
	 *  c) resize
	 *  d) not using CV_GAUSSIAN for smoothing
	 */

	/** Smooth the Image **/
	TICTOC::timer().tic("cvSmooth");
	cvSmooth(Worm->ImgOrig,Worm->ImgSmooth,CV_GAUSSIAN,Params->GaussSize*2+1);
	TICTOC::timer().toc("cvSmooth");

	/** Dilate and Erode **/
//	cvDilate(Worm->ImgSmooth, Worm->ImgSmooth,NULL,3);
//	cvErode(Worm->ImgSmooth, Worm->ImgSmooth,NULL,2);


	/** Threshold the Image **/
	TICTOC::timer().tic("cvThreshold");
	cvThreshold(Worm->ImgSmooth,Worm->ImgThresh,Params->BinThresh,255,CV_THRESH_BINARY );
	TICTOC::timer().toc("cvThreshold");


	/** Dilate and Erode **/
	if (Params->DilateErode==1){
		TICTOC::timer().tic("DilateAndErode");
		cvDilate(Worm->ImgThresh, Worm->ImgThresh,NULL,3);
		cvErode(Worm->ImgThresh, Worm->ImgThresh,NULL,2);
		TICTOC::timer().toc("DilateAndErode");
	}


	/** Find Contours **/
	CvSeq* contours;
	IplImage* TempImage=cvCreateImage(cvGetSize(Worm->ImgThresh),IPL_DEPTH_8U,1);
	cvCopy(Worm->ImgThresh,TempImage);
	TICTOC::timer().tic("cvFindContours");
	cvFindContours(TempImage,Worm->MemStorage, &contours,sizeof(CvContour),CV_RETR_EXTERNAL,CV_CHAIN_APPROX_NONE,cvPoint(0,0));
	TICTOC::timer().toc("cvFindContours");

	CvSeq* rough;
	/** Find Longest Contour **/
	TICTOC::timer().tic("cvLongestContour");
	if (contours) LongestContour(contours,&rough);
	TICTOC::timer().toc("cvLongestContour");
	cvReleaseImage(&TempImage);

	/** Smooth the Boundary **/
	if (Params->BoundSmoothSize>0){
		TICTOC::timer().tic("SmoothBoundary");
		CvSeq* smooth=smoothPtSequence(rough,Params->BoundSmoothSize,Worm->MemStorage);
		Worm->Boundary=cvCloneSeq(smooth);
		TICTOC::timer().toc("SmoothBoundary");

	} else {
		Worm->Boundary=cvCloneSeq(rough);
	}



}
Exemplo n.º 23
0
/**
 * @brief Main principal
 * @param argc El número de argumentos del programa
 * @param argv Cadenas de argumentos del programa
 * @return Nada si es correcto o algún número negativo si es incorrecto
 */
int main( int argc, char** argv ) {
	
	if( argc < 4 )
		return -1;

	// Declaración de variables
	gsl_rng *rng;
	IplImage *frame, *hsv_frame;
	histogram **ref_histos, *histo_aux;
	CvCapture *video;
	particle **particles, **aux, **nuevas_particulas;
	CvScalar color_rojo = CV_RGB(255,0,0), color_azul = CV_RGB(0,0,255);
	CvRect *regions;
	int num_objects = 0;
	int i = 1, MAX_OBJECTS = atoi(argv[3]), PARTICLES = atoi(argv[2]);
	FILE *datos;
	char name[45], num[3], *p1, *p2;
	clock_t t_ini, t_fin;
	double ms;
	
	video = cvCaptureFromFile( argv[1] );
	if( !video ) {
		printf("No se pudo abrir el fichero de video %s\n", argv[1]);
		exit(-1);
	}

	first_frame = cvQueryFrame( video );
	num_objects = get_regions( &regions,  MAX_OBJECTS, argv[1] );
	if( num_objects == 0 )
		exit(-1);

	t_ini = clock();
	hsv_frame = bgr2hsv( first_frame );
	histo_aux = (histogram*) malloc( sizeof(histogram) );
	histo_aux->n = NH*NS + NV;
	nuevas_particulas = (particle**) malloc( num_objects * sizeof( particle* ) );
	for( int j = 0; j < num_objects; ++j )
		nuevas_particulas[j] = (particle*) malloc( PARTICLES * sizeof( particle ) );
			
	// Computamos los histogramas de referencia y distribuimos las partículas iniciales
	ref_histos = compute_ref_histos( hsv_frame, regions, num_objects );
	particles = init_distribution( regions, num_objects, PARTICLES );

	// Mostramos el tracking
	if( show_tracking ) {

		// Mostramos todas las partículas
		if( show_all )
			for( int k = 0; k < num_objects; ++k )
				for( int j = 0; j < PARTICLES; ++j )
					display_particle( first_frame, particles[k][j], color_azul );

		// Dibujamos la partícula más prometedora de cada objeto
		for( int k = 0; k < num_objects; ++k )
			display_particle( first_frame, particles[k][0], color_rojo );

		cvNamedWindow( "Video", 1 );
		cvShowImage( "Video", first_frame );
		cvWaitKey( 5 );
	}

	// Exportamos los histogramas de referencia y los frames
	if( exportar ) {
		export_ref_histos( ref_histos, num_objects );
		export_frame( first_frame, 1 );

		for( int k = 0; k < num_objects; ++k ) {
			sprintf( num, "%02d", k );
			strcpy( name, REGION_BASE);
			p1 = strrchr( argv[1], '/' );
			p2 = strrchr( argv[1], '.' );
			strncat( name, (++p1), p2-p1 );
			strcat( name, num );
			strcat( name, ".txt" );
			datos = fopen( name, "a+" );
			if( ! datos ) {
				printf("Error creando fichero para datos\n");
				return -1;
			}
			fprintf( datos, "%d\t%f\t%f\n", 0, particles[k][0].x, particles[k][0].y );
			fclose( datos );
		}
	}

	cvReleaseImage( &hsv_frame );
	
	// Inicializamos el generador de números aleatorios
	gsl_rng_env_setup();
	rng = gsl_rng_alloc( gsl_rng_mt19937 );
	gsl_rng_set(rng, (unsigned long) time(NULL));

	// Recordar que frame no se puede liberar debido al cvQueryFrame
	while( frame = cvQueryFrame( video ) ) {
		hsv_frame = bgr2hsv( frame );

		// Realizamos la predicción y medición de probabilidad para cada partícula
		for( int k = 0; k < num_objects; ++k )
			for( int j = 0; j < PARTICLES; ++j ) {
				transition( &particles[k][j], frame->width, frame->height, rng );
				particles[k][j].w = likelihood( hsv_frame, &particles[k][j], ref_histos[k], histo_aux );
			}
			
		// Normalizamos los pesos y remuestreamos un conjunto de partículas no ponderadas
		normalize_weights( particles, num_objects, PARTICLES );
		for (int k = 0; k < num_objects; ++k )
			resample( particles[k], PARTICLES, nuevas_particulas[k] );
		aux = particles;
		particles = nuevas_particulas;
		nuevas_particulas = aux;

		// Mostramos el tracking
		if( show_tracking ) {

			// Mostramos todas las partículas
			if( show_all )
				for( int k = 0; k < num_objects; ++k )
					for( int j = 0; j < PARTICLES; ++j )
						display_particle( frame, particles[k][j], color_azul );
		
			// Dibujamos la partícula más prometedora de cada objeto
			for( int k = 0; k < num_objects; ++k )
				display_particle( frame, particles[k][0], color_rojo );
			cvNamedWindow( "Video", 1 );
			cvShowImage( "Video", frame );
			cvWaitKey( 5 );
		}

		// Exportamos los histogramas de referencia y los frames
		if( exportar ) {
			export_frame( frame, i+1 );

			for( int k = 0; k < num_objects; ++k ) {
				sprintf( num, "%02d", k );
				strcpy( name, REGION_BASE);
				p1 = strrchr( argv[1], '/' );
				p2 = strrchr( argv[1], '.' );
				strncat( name, (++p1), p2-p1 );
				strcat( name, num );
				strcat( name, ".txt" );
				datos = fopen( name, "a+" );
				if( ! datos ) {
					printf("Error abriendo fichero para datos\n");
					return -1;
				}
				fprintf( datos, "%d\t%f\t%f\n", i, particles[k][0].x, particles[k][0].y );
				fclose( datos );
			}
		}

		cvReleaseImage( &hsv_frame );
		++i;
	}
	
	// Liberamos todos los recursos usados (mallocs, gsl y frames)
	cvReleaseCapture( &video );
	gsl_rng_free( rng );
	free( histo_aux );
	free( regions );

	for( int i = 0; i < num_objects; ++i ) {
		free( ref_histos[i] );
		free( particles[i] );
		free( nuevas_particulas[i] );
	}

	free( particles );
	free( nuevas_particulas );

	t_fin = clock();
	ms = ((double)(t_fin - t_ini) / CLOCKS_PER_SEC) * 1000.0;
	printf("%d\t%d\t%.10g\n", PARTICLES, num_objects, ms);
}
Exemplo n.º 24
0
int main(int argc, char *argv[])
{
	IplImage *img = 0, *img2=0 ;
	int height,width,step,channels;
	int i,j,k;

	if(argc<4){
		printf("Usage: ./a.out <image-file-name> <watermarker image> <audio file>\n");
		exit(0);
	}

	// load an image  
	img=cvLoadImage(argv[1]);
	if(!img){
		printf("Could not load image file: %s\n",argv[1]);
		exit(0);
	}

	/// Load Watermark Image
	img2=cvLoadImage(argv[2]);
	if(!img2){
		printf("Could not load image file: %s\n",argv[2]);
		exit(0);
	}
	height    = img->height;
	width     = img->width;
	step      = img->widthStep;
	channels  = img->nChannels;
	int nchannels = img->nChannels;
	data      = (uchar *)img->imageData;
	int height2=img2->height;
	int width2=img2->width;
	int step2=img2->widthStep;
	int channels2=img2->nChannels;
	data2  = (uchar *)img2->imageData;
		
	///// Inserting Watermark
	insert_watermark(img,img2);

	//Read an audio file and write into the image
	FILE *fp=fopen(argv[3],"r");
	if(fp==NULL){
		printf("Could not load audio file: %s\n",argv[3]);
		exit(0);
	}
	
	insert_audio(fp,img,img2);


	//printf("%d row=%d col=%d \n",count1,row_count,col_count);

	// Extract bit form count of total no. of Bytes which are presence in audio file 
	int a1,a2,a3,a4;
	a1=count1%256;
	a2=count1%65536;
	a3=count1%16777216;
	a4=count1%4294967296;
	//printf("%d %d %d %d\n",a1,(a2-a1)>>8,(a3-a2)>>16,(a4-a3)>>24);
	int size[4]={0};
	size[0]=a1;
	size[1]=(a2-a1)>>8;
	size[2]=(a3-a2)>>16;
	size[3]=(a4-a3)>>24;

	printf("bytes=%d\n", size[0] | (size[1]<<8) | (size[2]<<16) | (size[3]<<24));
	int val1,val2;

	//insert the header of the audio//
	for(i=0;i<4;i++)
	{
		a1=size[i]%4;
		a2=size[i]%32;
		a3=size[i]%256;
		val1=(a2-a1)>>2;
		val2=(a3-a2)>>5;
		data[0+i*channels+0]= (data[0+i*channels+0] &252);
		data[0+i*channels+1]= (data[0+i*channels+1] &248);
		data[0+i*channels+2]= (data[0+i*channels+2] &248);
		data[0+i*channels+0]= (data[0+i*channels+0] |a1);
		data[0+i*channels+1]= (data[0+i*channels+1] |val1);
		data[0+i*channels+2]= (data[0+i*channels+2] |val2);

	}
	cvSaveImage("new_image.png", img );

	 // create a window
	cvNamedWindow("mainWin", CV_WINDOW_AUTOSIZE); 
	cvMoveWindow("mainWin", 100, 100);

	// show the image
	cvShowImage("mainWin", img );

	// wait for a key
	cvWaitKey(0);

	// release the image
	cvReleaseImage(&img );
	return 0;
}
Exemplo n.º 25
0
int main(void)
{

	koki_camera_params_t params;

	params.size.x = WIDTH;
	params.size.y = HEIGHT;
	params.principal_point.x = params.size.x / 2;
	params.principal_point.y = params.size.y / 2;
	params.focal_length.x = 571.0;
	params.focal_length.y = 571.0;

	int fd = koki_v4l_open_cam("/dev/video0");
	struct v4l2_format fmt = koki_v4l_create_YUYV_format(WIDTH, HEIGHT);
	koki_v4l_set_format(fd, fmt);

	int num_buffers = 1;
	koki_buffer_t *buffers;
	buffers = koki_v4l_prepare_buffers(fd, &num_buffers);

	koki_v4l_start_stream(fd);

	while (1){

		uint8_t *yuyv = koki_v4l_get_frame_array(fd, buffers);
		IplImage *frame = koki_v4l_YUYV_frame_to_RGB_image(yuyv, WIDTH, HEIGHT);

		IplImage *thresholded;
		thresholded = koki_threshold_adaptive(frame, 5, 3,
						      KOKI_ADAPTIVE_MEAN);
		cvShowImage("thresh", thresholded);

		koki_labelled_image_t *l = koki_label_image(thresholded, 128);

		for (int i=0; i<l->clips->len; i++){

			if (!koki_label_useable(l, i))
				continue;

			GSList *contour = koki_contour_find(l, i);

			koki_quad_t *quad = koki_quad_find_vertices(contour);

			if (quad == NULL){
				koki_contour_free(contour);
				continue;
			}

			koki_contour_draw(frame, contour);
			koki_quad_refine_vertices(quad);
			koki_quad_draw(frame, quad);

			koki_marker_t *marker;
			marker = koki_marker_new(quad);

			if (koki_marker_recover_code(marker, frame)){

				koki_pose_estimate(marker, 0.11, &params);
				koki_bearing_estimate(marker);

				printf("marker code: %d\n", marker->code);

			}

			koki_contour_free(contour);
			koki_quad_free(quad);
			koki_marker_free(marker);

		}//for

		cvShowImage("frame", frame);
		cvWaitKey(1);

		koki_labelled_image_free(l);
		cvReleaseImage(&thresholded);
		cvReleaseImage(&frame);

	}

	return 0;

	cvDestroyWindow("frame");
	cvDestroyWindow("thresh");

}
int main()
{
    int c;//to store ascii value of key pressed
    int i,j,h,s,v;
    CvCapture *capture=cvCreateCameraCapture(0);//initiate camera 
    //because of recursively updating frame , here we dont need to wait for camera as for some ms frame will be black and then as camera starts , frame will update and shaw image
    IplImage *frame;
    
     IplImage* outputred;
   IplImage* outputone;
   IplImage* outputtwo;
  // IplImage* outputblue;
   IplImage* outputwhite;
  // IplImage* outputorange;


    uchar *ptemp;
   // uchar *poutputorange;
    uchar *poutputred;
    uchar *poutputwhite;
   // uchar *poutputblue;
    uchar *poutputone;
    uchar *poutputtwo;
   
   
    if(capture!=NULL)
    {
                     frame=cvQueryFrame(capture);//take current image in camera and give it to frame pointer
                     cvNamedWindow("img");
                     while(1)
                     {
                             
                             cvShowImage("img",frame);
                            frame=cvQueryFrame(capture);
                            temp=cvCreateImage(cvGetSize(frame),IPL_DEPTH_8U,3);
                            cvCvtColor(frame,temp,CV_BGR2HSV);
                            //frame rate time period (if not given system will hang as system processing speed is very fast
                           // cvNamedWindow("output",1);
                            //cvShowImage("output",temp);
                            cvSetMouseCallback("img", my_mouse_callback, NULL);
                            c=cvWaitKey(1);
                            outputred=cvCreateImage(cvGetSize(frame),IPL_DEPTH_8U,1);
     outputone=cvCreateImage(cvGetSize(frame),IPL_DEPTH_8U,1);
      outputtwo=cvCreateImage(cvGetSize(frame),IPL_DEPTH_8U,1);
     //  outputblue=cvCreateImage(cvGetSize(frame),IPL_DEPTH_8U,1);
        outputwhite=cvCreateImage(cvGetSize(frame),IPL_DEPTH_8U,1);
     //    outputorange=cvCreateImage(cvGetSize(frame),IPL_DEPTH_8U,1);
    
    cvCvtColor(frame,temp,CV_BGR2HSV);
    ptemp  =  (uchar*)temp->imageData;
    poutputone  =  (uchar*)outputone->imageData;
    poutputtwo  =  (uchar*)outputtwo->imageData;
   // poutputblue  =  (uchar*)outputblue->imageData;
    poutputwhite  =  (uchar*)outputwhite->imageData;
    poutputred  =  (uchar*)outputred->imageData;
  //  poutputorange  =  (uchar*)outputorange->imageData;
    
     for(i=0;i<frame->height;i++)
            for(j=0;j<frame->width;j++)
            {
                                       h=ptemp[i*temp->widthStep + j*temp->nChannels+0];
                                       s=ptemp[i*temp->widthStep + j*temp->nChannels+1];
                                       v=ptemp[i*temp->widthStep + j*temp->nChannels+2];
                                       
                                         if((h>=157&&h<=178)&&s>=110 && s<=255 &&v>=90)//red
                                                                         poutputred[i*outputred->widthStep + j]=255;
                                       else
                                                                          poutputred[i*outputred->widthStep + j]=0;
                                       if((h==0 && s==0 &&v<150 && v>9)||(h>=25 &&h<=110 && s>=20&&s<=200&& v>=13 && v<=120))//one
                                                                         poutputone[i*outputone->widthStep + j]=255;
                                       else
                                                                          poutputone[i*outputone->widthStep + j]=0;
                                  
                               /*       if((h>=145 &&h<=160)&&s>=175 && s<=255 && v>=80 && v<=150)//one
                                                                         poutputone[i*outputone->widthStep + j]=255;
                                       else
                                                                          poutputone[i*outputone->widthStep + j]=0;
                                 */                                         
                                       if(h>=110 &&h<=153&&s>=90&&v>=7 && v<=150)//two
                                                                         poutputtwo[i*outputtwo->widthStep + j]=255;
                                       else
                                                                          poutputtwo[i*outputtwo->widthStep + j]=0;
                                          if( (h==0 && s==0 && v>=250) || (((h>=0 && h<=30)) && s>=50&&s<=200&&v>=110) )//white 
                                                                         poutputwhite[i*outputwhite->widthStep + j]=255;
                                       else
                                                                          poutputwhite[i*outputwhite->widthStep + j]=0;
                                   
                                                                          
            }
     
    //cvNamedWindow("output",1);
    cvNamedWindow("outputred",1);
    cvNamedWindow("outputone",1);
    cvNamedWindow("outputtwo",1);
  //  cvNamedWindow("outputblue",1);
    cvNamedWindow("outputwhite",1);
    //cvNamedWindow("outputorange",1);

    //cvShowImage("output",temp);
    cvShowImage("outputred",outputred);
    cvShowImage("outputone",outputone);
    cvShowImage("outputtwo",outputtwo);
   // cvShowImage("outputblue",outputblue);
    cvShowImage("outputwhite",outputwhite);
   // cvShowImage("outputorange",outputorange);
    cvWaitKey(1);
 /*   imgOutred=cvCreateImage(cvGetSize(input),IPL_DEPTH_8U,3);
    labelImgred=cvCreateImage(cvGetSize(input),IPL_DEPTH_LABEL,1);
    CvBlobs blobsred;
    cvLabel(outputred, labelImgred, blobsred);
    cvRenderBlobs(labelImgred, blobsred, input, imgOutred);
    cvFilterByArea(blobsred,PIXEL_MIN,PIXEL_MAX);

    imgOutone=cvCreateImage(cvGetSize(input),IPL_DEPTH_8U,3);
    labelImgone=cvCreateImage(cvGetSize(input),IPL_DEPTH_LABEL,1);
    CvBlobs blobsone;
    cvLabel(outputone, labelImgone, blobsone);
    cvRenderBlobs(labelImgone, blobsone, input, imgOutone);
    cvFilterByArea(blobsone,PIXEL_MIN,PIXEL_MAX);

    imgOuttwo=cvCreateImage(cvGetSize(input),IPL_DEPTH_8U,3);
    labelImgtwo=cvCreateImage(cvGetSize(input),IPL_DEPTH_LABEL,1);
    CvBlobs blobstwo;
    cvLabel(outputtwo, labelImgtwo, blobstwo);
    cvRenderBlobs(labelImgtwo, blobstwo, input, imgOuttwo);
    cvFilterByArea(blobstwo,PIXEL_MIN,PIXEL_MAX);

    imgOutblue=cvCreateImage(cvGetSize(input),IPL_DEPTH_8U,3);
    labelImgblue=cvCreateImage(cvGetSize(input),IPL_DEPTH_LABEL,1);
    CvBlobs blobsblue;
    cvLabel(outputblue, labelImgblue, blobsblue);
    cvRenderBlobs(labelImgblue, blobsblue, input, imgOutblue);
    cvFilterByArea(blobsblue,PIXEL_MIN,PIXEL_MAX);
    
    imgOutwhite=cvCreateImage(cvGetSize(input),IPL_DEPTH_8U,3);
    labelImgwhite=cvCreateImage(cvGetSize(input),IPL_DEPTH_LABEL,1);
    CvBlobs blobswhite;
    cvLabel(outputwhite, labelImgwhite, blobswhite);
    cvRenderBlobs(labelImgwhite, blobswhite, input, imgOutwhite);
    cvFilterByArea(blobswhite,PIXEL_MINWHITE,PIXEL_MAX);
    
   */ 
    
    
    
    
    cvReleaseImage( &outputred ); 
    cvReleaseImage( &outputone ); 
    cvReleaseImage( &outputtwo ); 
    //cvReleaseImage( &outputblue ); 
    cvReleaseImage( &outputwhite ); 
    //cvReleaseImage( &outputorange );
                            cvReleaseImage(&temp ); 
                            //cvDestroyWindow( "output" );
                            if(c>0&&c<255)
                                     {
                                            cvDestroyWindow( "img" );
                                            cvReleaseImage( &frame ); 
                                            cvReleaseCapture(&capture);
     //                                       cvDestroyWindow( "outputred" );
                                            //cvDestroyWindow( "output" );
  //  cvDestroyWindow( "outputone" );
   // cvDestroyWindow( "outputtwo" );
    //cvDestroyWindow( "outputblue" );
   // cvDestroyWindow( "outputwhite" );
    //cvDestroyWindow( "outputorange" );
    
    cvReleaseImage( &outputred ); 
    cvReleaseImage( &outputone ); 
    cvReleaseImage( &outputtwo ); 
    //cvReleaseImage( &outputblue ); 
    cvReleaseImage( &outputwhite ); 
    //cvReleaseImage( &outputorange ); 
     
    
                                            return 0;
                                     }
                     }
    }
}
Exemplo n.º 27
0
bool CvCaptureCAM_DC1394_v2_CPP::grabFrame()
{
    dc1394capture_policy_t policy = DC1394_CAPTURE_POLICY_WAIT;
    bool code = false, isColor;
    dc1394video_frame_t *dcFrame = 0, *fs = 0;
    int i, nch;

    if (!dcCam || (!started && !startCapture()))
        return false;

    dc1394_capture_dequeue(dcCam, policy, &dcFrame);

    if (!dcFrame)
        return false;

    if (/*dcFrame->frames_behind > 1 ||*/ dc1394_capture_is_frame_corrupt(dcCam, dcFrame) == DC1394_TRUE)
    {
        goto _exit_;
    }

    isColor = dcFrame->color_coding != DC1394_COLOR_CODING_MONO8 &&
              dcFrame->color_coding != DC1394_COLOR_CODING_MONO16 &&
              dcFrame->color_coding != DC1394_COLOR_CODING_MONO16S;

    if (nimages == 2)
    {
        fs = (dc1394video_frame_t*)calloc(1, sizeof(*fs));

        //dc1394_deinterlace_stereo_frames(dcFrame, fs, DC1394_STEREO_METHOD_INTERLACED);
        dc1394_deinterlace_stereo_frames_fixed(dcFrame, fs, DC1394_STEREO_METHOD_INTERLACED);

        dc1394_capture_enqueue(dcCam, dcFrame); // release the captured frame as soon as possible
        dcFrame = 0;
        if (!fs->image)
            goto _exit_;
        isColor = colorStereo;
    }
    nch = isColor ? 3 : 1;

    for (i = 0; i < nimages; i++)
    {
        IplImage fhdr;
        dc1394video_frame_t f = fs ? *fs : *dcFrame, *fc = &f;
        f.size[1] /= nimages;
        f.image += f.size[0] * f.size[1] * i; // TODO: make it more universal
        if (isColor)
        {
            if (!frameC)
                frameC = (dc1394video_frame_t*)calloc(1, sizeof(*frameC));
            frameC->color_coding = nch == 3 ? DC1394_COLOR_CODING_RGB8 : DC1394_COLOR_CODING_MONO8;
            if (nimages == 1)
            {
                dc1394_convert_frames(&f, frameC);
                dc1394_capture_enqueue(dcCam, dcFrame);
                dcFrame = 0;
            }
            else
            {
                f.color_filter = bayerFilter;
                dc1394_debayer_frames(&f, frameC, bayer);
            }
            fc = frameC;
        }
        if (!img[i])
            img[i] = cvCreateImage(cvSize(fc->size[0], fc->size[1]), 8, nch);
        cvInitImageHeader(&fhdr, cvSize(fc->size[0], fc->size[1]), 8, nch);
        cvSetData(&fhdr, fc->image, fc->size[0]*nch);

    // Swap R&B channels:
    if (nch==3)
        cvConvertImage(&fhdr,&fhdr,CV_CVTIMG_SWAP_RB);

        if( rectify && cameraId == VIDERE && nimages == 2 )
        {
            if( !maps[0][0] || maps[0][0]->width != img[i]->width || maps[0][0]->height != img[i]->height )
            {
                CvSize size = cvGetSize(img[i]);
                cvReleaseImage(&maps[0][0]);
                cvReleaseImage(&maps[0][1]);
                cvReleaseImage(&maps[1][0]);
                cvReleaseImage(&maps[1][1]);
                maps[0][0] = cvCreateImage(size, IPL_DEPTH_16S, 2);
                maps[0][1] = cvCreateImage(size, IPL_DEPTH_16S, 1);
                maps[1][0] = cvCreateImage(size, IPL_DEPTH_16S, 2);
                maps[1][1] = cvCreateImage(size, IPL_DEPTH_16S, 1);
                char buf[4*4096];
                if( getVidereCalibrationInfo( buf, (int)sizeof(buf) ) &&
                    initVidereRectifyMaps( buf, maps[0], maps[1] ))
                    ;
                else
                    rectify = false;
            }
            cvRemap(&fhdr, img[i], maps[i][0], maps[i][1]);
        }
        else
            cvCopy(&fhdr, img[i]);
    }

    code = true;

_exit_:
    if (dcFrame)
        dc1394_capture_enqueue(dcCam, dcFrame);
    if (fs)
    {
        if (fs->image)
            free(fs->image);
        free(fs);
    }

    return code;
}
ofxCvOpticalFlowLK::~ofxCvOpticalFlowLK(void)
{
	cvReleaseImage(&vel_x);
	cvReleaseImage(&vel_y);
}
Exemplo n.º 29
0
BOOL  CFaceProcess::OnSavePic() 
{
	// TODO: Add your control notification handler code here
	KillTimer(timer);
	if (!m_Video){
// 	{AfxMessageBox("先打开摄像头"); return false; 	}
// 	m_GrabFrame=cvQueryFrame(m_Video);
// 	if (!m_GrabFrame)
// 	{AfxMessageBox("截取视屏帧失败,请重试!"); return false; 	}
	//static	char countsnap='1';
	if( !cvGrabFrame( m_Video))
		return FALSE;
	m_GrabFrame = cvRetrieveFrame(m_Video );
	if( !m_GrabFrame)
		return FALSE;
	if( !m_SaveFrame)
		m_SaveFrame = cvCreateImage( cvSize(m_GrabFrame->width,m_GrabFrame->height),
		IPL_DEPTH_8U, m_GrabFrame->nChannels );
	if( m_GrabFrame->origin == IPL_ORIGIN_TL )
		cvCopy( m_GrabFrame, m_SaveFrame, 0 );
	else
			cvFlip( m_GrabFrame, m_SaveFrame, 0 );
	}//if (!m_Video){
	static  int countsnap=1;
	CString   m_name1=GetImageName(path,countsnap);
	/*********加上后缀名称,加上存放相对路径*********************/
	CString   headPath="ImageLab/";
			  m_name1=headPath+m_name1+".pgm";
	const char* pszStr1 = m_name1.GetBuffer(m_name1.GetLength());
	//AfxMessageBox(m_name);
	IplImage * m_snap1=cvCreateImage(cvGetSize(m_SaveFrame),m_SaveFrame->depth,m_SaveFrame->nChannels);
	        cvCopy(m_SaveFrame,m_snap1,NULL);
	        //m_snap1->origin=1;//等于0保存倒立图向
	IplImage *faceImage1=0;
	IplImage *faceGray1=0;
	//检测人脸
	try{
    	if(faceDetector.detect_and_draw(m_snap1)){
	                  faceImage1=faceDetector.getFaceImage();
	                 // faceImage1->origin=1;
	                  //化简图片
	                  if(faceSimplifier.Simplify(faceImage1)){
	                           faceGray1=faceSimplifier.getFaceImage();
                               //faceGray1->origin=1;//等于0保存倒立图向
         	                   cvSaveImage(pszStr1,faceGray1);	//把图像写入文件
							   countsnap++;
					  }
		}
		SetTimer(timer,1,NULL) ;
	}catch(...)
	{
		SetTimer(timer,1,NULL) ;
        AfxMessageBox("保存图片失败--OnSavePic!!");
		return false;
	}
	if(countsnap>3)
			GetDlgItem(IDC_OK)->EnableWindow(TRUE);//使控件有效
	if(m_snap1)
		cvReleaseImage(&m_snap1);
    if(faceImage1)
		cvReleaseImage(&faceImage1);
	if(faceGray1)
		cvReleaseImage(&faceGray1);
	return true;
}
Exemplo n.º 30
0
int main()
{
	// Initialize capturing live feed from the camera
	CvCapture* capture = 0;
	capture = cvCaptureFromCAM(1);	 //depending on from which camera you are Capturing
	// Couldn't get a device? Throw an error and quit
	if(!capture)
    {
        printf("Could not initialize capturing...\n");
        return -1;
    }

	// The two windows we'll be using
    cvNamedWindow("video");
	cvNamedWindow("thresh");

	// This image holds the "scribble" data...
	// the tracked positions of the ball
	IplImage* imgScribble = NULL;

	// An infinite loop
	while(true)
    {
		// Will hold a frame captured from the camera
		IplImage* frame = 0;
		frame = cvQueryFrame(capture);

		// If we couldn't grab a frame... quit
        if(!frame)
            break;
		
		// If this is the first frame, we need to initialize it
		if(imgScribble == NULL)
		{
			imgScribble = cvCreateImage(cvGetSize(frame), 8, 3);
		}

		// Holds the yellow thresholded image (yellow = white, rest = black)
		IplImage* imgYellowThresh = GetThresholdedImage(frame);

		// Calculate the moments to estimate the position of the ball
		CvMoments *moments = (CvMoments*)malloc(sizeof(CvMoments));
		cvMoments(imgYellowThresh, moments, 1);

		// The actual moment values
		double moment10 = cvGetSpatialMoment(moments, 1, 0);
		double moment01 = cvGetSpatialMoment(moments, 0, 1);
		double area = cvGetCentralMoment(moments, 0, 0);

		// Holding the last and current ball positions
		static int posX = 0;
		static int posY = 0;

		int lastX = posX;
		int lastY = posY;

		posX = moment10/area;
		posY = moment01/area;

		// Print it out for debugging purposes
		printf("position (%d,%d)\n", posX, posY);

		// We want to draw a line only if its a valid position
		if(lastX>0 && lastY>0 && posX>0 && posY>0)
		{
			// Draw a yellow line from the previous point to the current point
			cvLine(imgScribble, cvPoint(posX, posY), cvPoint(lastX, lastY), cvScalar(0,255,255), 5);
		}

		// Add the scribbling image and the frame... and we get a combination of the two
		cvAdd(frame, imgScribble, frame);
		cvShowImage("thresh", imgYellowThresh);
		cvShowImage("video", frame);

		// Wait for a keypress
		int c = cvWaitKey(10);
		if(c!=-1)
		{
			// If pressed, break out of the loop
            break;
		}

		// Release the thresholded image... we need no memory leaks.. please
		cvReleaseImage(&imgYellowThresh);

		delete moments;
    }

	// We're done using the camera. Other applications can now use it
	cvReleaseCapture(&capture);
    return 0;
}