VALUE new_object(CvArr *arr, int is_binary = 0) { VALUE object = rb_allocate(rb_class()); cvMoments(arr, CVMOMENTS(object), is_binary); return object; }
void trackObject(IplImage* imgThresh){ // Calculate the moments of 'imgThresh' CvMoments *moments = (CvMoments*)malloc(sizeof(CvMoments)); cvMoments(imgThresh, moments, 1); double moment10 = cvGetSpatialMoment(moments, 1, 0); double moment01 = cvGetSpatialMoment(moments, 0, 1); double area = cvGetCentralMoment(moments, 0, 0); // if the area<100, I consider that the there are no object in the image and it's because of the noise, the area is not zero if(area>100){ // calculate the position of the ball int posX = moment10/area; int posY = moment01/area; if(debug>1 && lastX>=0 && lastY>=0 && posX>=0 && posY>=0) { // Draw a yellow line from the previous point to the current point cvLine(imgTracking, cvPoint(posX, posY), cvPoint(lastX, lastY), cvScalar(0,0,255), 4); } lastX = posX; lastY = posY; printf("%+05d %+05d\n", posX-halfWidth, posY-halfHeight); }else{ printf("***** *****\n"); } free(moments); }
void trackObject(IplImage* imgThresh){ // Kalkulasi momen untuk image yg telah di konvert CvMoments *moments = (CvMoments*)malloc(sizeof(CvMoments)); cvMoments(imgThresh, moments, 1); double moment10 = cvGetSpatialMoment(moments, 1, 0); double moment01 = cvGetSpatialMoment(moments, 0, 1); double area = cvGetCentralMoment(moments, 0, 0); // jika area <1000, sya memperhitungkan tidak ada objek di gambar karena akan mengganggu, dan areanya tidak nol if(area>1000){ // Menghitung posisi objek int posX = moment10/area; int posY = moment01/area; if(lastX>=0 && lastY>=0 && posX>=0 && posY>=0) { // Menggambar garis berwarna sesuai dengan HSV di Konfigurasi, dari titik pertama ke titik terakhir cvLine(imgTracking, cvPoint(posX, posY), cvPoint(lastX, lastY), cvScalar(lowH,lowS,lowV), 4); } lastX = posX; lastY = posY; } free(moments); }
IplImage* threshImage(IplImage *imgOrig, CvScalar lower, CvScalar upper, int n) { IplImage* imgHSV = cvCreateImage(cvGetSize(imgOrig), 8, 3); //size, depth, channels cvCvtColor(imgOrig, imgHSV, CV_BGR2HSV); //check! IplImage* imgThresh = cvCreateImage(cvGetSize(imgOrig), 8, 1); cvInRangeS(imgHSV, lower, upper, imgThresh); CvMoments *moments = (CvMoments*)malloc(sizeof(CvMoments)); cvMoments (imgThresh, moments, 1); double moment10 = cvGetSpatialMoment(moments, 1, 0); double moment01 = cvGetSpatialMoment(moments, 0, 1); double area = cvGetSpatialMoment(moments, 0, 0); static int posX = 0; static int posY = 0; posX = moment10/area; posY = moment01/area; int curX = posX * XRATIO; int curY = posY * YRATIO; SetCursorPos(1366-curX, curY); delete moments; cvReleaseImage(&imgHSV); return imgThresh; }
IplImage* getMoment(IplImage* img) { //Calculate Position CvMoments *moments = (CvMoments*)malloc(sizeof(CvMoments)); cvMoments(imgRed, moments, 1); double moment10 = cvGetSpatialMoment(moments, 1, 0); double moment01 = cvGetSpatialMoment(moments, 0, 1); double area = cvGetCentralMoment(moments,0,0); static int posX = 0; static int posY = 0; int lastX = posX; int lastY = posY; posX = moment10/area; posY = moment01/area; CvPoint* center = new CvPoint(); center->x = lastX; center->y = lastY; printf("%d lastX", lastX); printf("%d lastY", lastY); if(lastX != posX && lastY != posY) { cvDrawCircle(img, *center, 25, CV_RGB(10,10,255), -1); } return 0; }
/** * Draw a circle in the middle of found areas * @param Image to draw on * @param Image to get moments from */ void ColorTracking::DrawPoint(IplImage *frame, IplImage *thresh) { // NONE OF THIS IS USED AT THE MOMENT - THIS IS FOR REFERENCE // Calculate the moments to estimate the position of the items CvMoments *moments = (CvMoments*)malloc(sizeof(CvMoments)); cvMoments(thresh, moments, 1); // The actual moment values double moment10 = cvGetSpatialMoment(moments, 1, 0); double moment01 = cvGetSpatialMoment(moments, 0, 1); double area = cvGetCentralMoment(moments, 0, 0); // Holding the last and current ball positions static int posX = 0; static int posY = 0; int lastX = posX; int lastY = posY; posX = moment10/area; posY = moment01/area; // We want to draw a line only if its a valid position if(lastX>0 && lastY>0 && posX>0 && posY>0) { // Just add a circle to the frame when it finds the color //addObjectToVideo(frame, cvPoint(posX, posY)); // find x distance from middle to object drawWidthDiff(frame, cvPoint(posX, posY), middle); } delete moments; }
void Segment::updateContour() { contour = 0; cvFindContours(iplMask, storage, &contour, sizeof(CvContour), CV_RETR_LIST, CV_CHAIN_APPROX_NONE); CvMoments moments; // CvSeq* c = contour; //cerr << "-------------------------- " << c << endl; /*for( CvSeq* c = contour; c!=NULL; c=c->h_next ){ for(int i = 0; i < c->total; i++){ CvPoint* p = CV_GET_SEQ_ELEM( CvPoint, c, i ); cerr << p->x << "," << p->y << endl; } } */ cvMoments(contour, &moments); double m00, m10, m01; m00 = cvGetSpatialMoment(&moments, 0,0); m10 = cvGetSpatialMoment(&moments, 1,0); m01 = cvGetSpatialMoment(&moments, 0,1); // TBD check that m00 != 0 float center_x = m10/m00; float center_y = m01/m00; centroid = cvPoint(center_x, center_y); }
static COMMAND_FUNC( do_centroid ) { OpenCV_Seq *ocv_seq_p; ocv_seq_p=PICK_OCV_SEQ("sequence"); if( ocv_seq_p == NO_OPENCV_SEQ ) return; if( ocv_seq_p->ocv_seq == NULL ) { sprintf(ERROR_STRING, "do_centroid: sequence is NULL.\n"); //WARN(ERROR_STRING); return; } CvMoments moments; double M00, M01, M10; float x,y; cvMoments(ocv_seq_p->ocv_seq, &moments, 1); M00 = cvGetSpatialMoment(&moments, 0, 0); M10 = cvGetSpatialMoment(&moments, 1, 0); M01 = cvGetSpatialMoment(&moments, 0, 1); x = (int)(M10/M00); y = (int)(M01/M00); /* char msg[30]; sprintf(msg, "M00 = %f, M10 = %f, M01 = %f", M00, M10, M01); WARN(msg); */ char number[30]; sprintf(number, "%f", x); ASSIGN_VAR("centroid_x", number); sprintf(number, "%f", y); ASSIGN_VAR("centroid_y", number); }
void tracker_bitmap_color::run() { vsx_bitmap *bmp = in_bitmap->get_addr(); //Check if there is any new image to process if(!(bmp && bmp->valid && bmp->timestamp && bmp->timestamp != m_previousTimestamp)){ #ifdef VSXU_DEBUG printf("Skipping frame after %d \n",m_previousTimestamp); #endif return; } m_previousTimestamp = bmp->timestamp; initialize_buffers(bmp->size_x, bmp->size_y); //Grab the input image m_img[FILTER_NONE]->imageData = (char*)bmp->data; //1)filter the image to the HSV color space cvCvtColor(m_img[FILTER_NONE],m_img[FILTER_HSV],CV_RGB2HSV); //2Threshold the image based on the supplied range of colors cvInRangeS( m_img[FILTER_HSV], cvScalar( (int)(in_color1->get(0)*255), (int)(in_color1->get(1)*255), (int)(in_color1->get(2)*255) ), cvScalar( (int)(in_color2->get(0)*255), (int)(in_color2->get(1)*255), (int)(in_color2->get(2)*255) ), m_img[FILTER_HSV_THRESHOLD] ); //3)Now the math to find the centroid of the "thresholded image" //3.1)Get the moments cvMoments(m_img[FILTER_HSV_THRESHOLD],m_moments,1); double moment10 = cvGetSpatialMoment(m_moments,1,0); double moment01 = cvGetSpatialMoment(m_moments,0,1); double area = cvGetCentralMoment(m_moments,0,0); //3.2)Calculate the positions double posX = moment10/area; double posY = moment01/area; //3.3) Normalize the positions posX = posX/bmp->size_x; posY = posY/bmp->size_y; //Finally set the result #ifdef VSXU_DEBUG printf("Position: (%f,%f)\n",posX,posY); #endif out_centroid->set(posX,0); out_centroid->set(posY,1); //Calculate the debug output only if requested if(m_compute_debug_out){ m_compute_debug_out = false; cvCvtColor(m_img[FILTER_HSV_THRESHOLD],m_img[FILTER_HSV_THRESHOLD_RGB], CV_GRAY2RGB); m_debug = *bmp; m_debug.data = m_img[FILTER_HSV_THRESHOLD_RGB]->imageData; out_debug->set_p(m_debug); } }
std::vector<int> Contours::getCentroid(){ std::vector<int> vec(2); CvMoments * myMoments = (CvMoments*)malloc( sizeof(CvMoments) ); cvMoments( this->c, myMoments ); vec[0]=(int) myMoments->m10/myMoments->m00; vec[1]=(int) myMoments->m01/myMoments->m00; free(myMoments); return vec; }
cv::Point2f BallIdentification::calcBallPose(CvSeq* contour){ if (contour != NULL) { CvMoments moments; cvMoments(contour, &moments); cv::Point2f ballPose = cv::Point2f(moments.m10/moments.m00, moments.m01/moments.m00); return ballPose; } return cv::Point2f(-1.0, -1.0); }
static int hu_moments_test( void ) { const double success_error_level = 1e-7; CvSize size = { 512, 512 }; int i; IplImage* img = atsCreateImage( size.width, size.height, 8, 1, 0 ); CvMoments moments; CvHuMoments a, b; AtsRandState rng_state; int seed = atsGetSeed(); double nu20, nu02, nu11, nu30, nu21, nu12, nu03; double err = 0; char buffer[100]; atsRandInit( &rng_state, 0, 255, seed ); atsbRand8u( &rng_state, (uchar*)(img->imageData), size.width * size.height ); cvMoments( img, &moments, 0 ); atsReleaseImage( img ); nu20 = cvGetNormalizedCentralMoment( &moments, 2, 0 ); nu11 = cvGetNormalizedCentralMoment( &moments, 1, 1 ); nu02 = cvGetNormalizedCentralMoment( &moments, 0, 2 ); nu30 = cvGetNormalizedCentralMoment( &moments, 3, 0 ); nu21 = cvGetNormalizedCentralMoment( &moments, 2, 1 ); nu12 = cvGetNormalizedCentralMoment( &moments, 1, 2 ); nu03 = cvGetNormalizedCentralMoment( &moments, 0, 3 ); cvGetHuMoments( &moments, &a ); b.hu1 = nu20 + nu02; b.hu2 = sqr(nu20 - nu02) + 4*sqr(nu11); b.hu3 = sqr(nu30 - 3*nu12) + sqr(3*nu21 - nu03); b.hu4 = sqr(nu30 + nu12) + sqr(nu21 + nu03); b.hu5 = (nu30 - 3*nu12)*(nu30 + nu12)*(sqr(nu30 + nu12) - 3*sqr(nu21 + nu03)) + (3*nu21 - nu03)*(nu21 + nu03)*(3*sqr(nu30 + nu12) - sqr(nu21 + nu03)); b.hu6 = (nu20 - nu02)*(sqr(nu30 + nu12) - sqr(nu21 + nu03)) + 4*nu11*(nu30 + nu12)*(nu21 + nu03); b.hu7 = (3*nu21 - nu03)*(nu30 + nu12)*(sqr(nu30 + nu12) - 3*sqr(nu21 + nu03)) + (3*nu12 - nu30)*(nu21 + nu03)*(3*sqr(nu30 + nu12) - sqr(nu21 + nu03)); for( i = 0; i < 7; i++ ) { double t = rel_err( ((double*)&b)[i], ((double*)&a)[i] ); if( t > err ) err = t; } sprintf( buffer, "Accuracy: %.4e", err ); return trsResult( err > success_error_level ? TRS_FAIL : TRS_OK, buffer ); }
/* * call-seq: * CvMoments.new(<i>src[,is_binary = nil]</i>) * * Calculates all moments up to third order of a polygon or rasterized shape. * <i>src</i> should be CvMat or CvPolygon. * * If is_binary = true, all the zero pixel values are treated as zeroes, all the others are treated as 1's. */ VALUE rb_initialize(int argc, VALUE *argv, VALUE self) { VALUE src, is_binary; rb_scan_args(argc, argv, "02", &src, &is_binary); if (!NIL_P(src)) { if (rb_obj_is_kind_of(src, cCvMat::rb_class()) || rb_obj_is_kind_of(src, cCvSeq::rb_class())) cvMoments(CVARR(src), CVMOMENTS(self), TRUE_OR_FALSE(is_binary, 0)); else rb_raise(rb_eTypeError, "argument 1 (src) should be %s or %s.", rb_class2name(cCvMat::rb_class()), rb_class2name(cCvSeq::rb_class())); } return self; }
void getObjectCentre(IplImage* img, int* posX, int *posY) { // Calculate the moments to estimate the position of the ball CvMoments *moments = (CvMoments*)malloc(sizeof(CvMoments)); cvMoments(img, moments, 1); // The actual moment values double moment10 = cvGetSpatialMoment(moments, 1, 0); double moment01 = cvGetSpatialMoment(moments, 0, 1); double area = cvGetCentralMoment(moments, 0, 0); *posX = moment10/area; *posY = moment01/area; delete moments; }
CvPoint FigureFrame::GetCenterOfGravity() { if(this->Matrix.rows <= 0 | this->Matrix.cols <= 0) return cvPoint(0,0); IplImage* pImg = new IplImage(this->Matrix); cvMoments(pImg, &this->moments, 1); CvPoint pt; pt.x = (int)(this->moments.m10 / this->moments.m00) + this->x; pt.y = (int)(this->moments.m01 / this->moments.m00) + this->y; return pt; }
/** * @internal * Extracts the relevant information of the found blobs * @note when this method is called, the found blobs should be stored in m_blobs member */ void BlobFinder::extractBlobsInformation() { // Order blobs (from bigger to smaller) -> this way the most relevant are at the beginning std::sort( m_blobs.begin(), m_blobs.end(), std::greater< Blob >() ); // Discard blobs (if there is more than the max) // TODO // To store contour moments CvMoments moment; // to read contour points CvSeqReader contourReader; CvPoint contourNode; // Calculate information about contours for( Blobs::size_type i = 0; (i < m_blobs.size()) && (i < m_maxBlobs); ++i ) { // Current blob Blob& blob = m_blobs[ i ]; // Get bbox blob.bbox = cvBoundingRect( blob.contour ); // Get center through moments cvMoments( blob.contour, &moment ); blob.center.x = (float)(moment.m10 / moment.m00); blob.center.y = (float)(moment.m01 / moment.m00); // Invert Y coordinate because our Y 0 is at top of the image, // and for Opencv is at the bottom of the image //blob.center.Y = inImage.GetHeight() - blob.center.Y; // Store the contour nodes cvStartReadSeq( blob.contour, &contourReader ); for( int j = 0; j < blob.contour->total; ++j ) { // Read node of the contour CV_READ_SEQ_ELEM( contourNode, contourReader ); blob.nodes.push_back( Point( (float)contourNode.x, (float)contourNode.y , 0) ); } } // Store number of actual blobs m_nBlobs = min( (int)m_blobs.size(), (int)m_maxBlobs ); }
void ThreadAnalyse::run() { int img_step = field->img->widthStep; int img_channels = field->img->nChannels; uchar * img_data = (uchar *) field->img->imageData; char key = -1; field->treat->origin = field->img->origin; int filtered_step = field->treat->widthStep; int filtered_channels = field->treat->nChannels; uchar * filtered_data_b = (uchar *) field->treat->imageData; for (int i = 0; i < field->img->width; i++) { for (int j = 0; j < field->img->height; j++) { if (i > field->box[XMIN] && i < field->box[XMAX] && j > field->box[YMIN] && j < field->box[YMAX] && (img_data[j * img_step + i * img_channels]) > field->color[MINB] && (img_data[j * img_step + i * img_channels]) < field->color[MAXB] && (img_data[j * img_step + i * img_channels + 1]) > field->color[MING] && (img_data[j * img_step + i * img_channels + 1]) < field->color[MAXG] && (img_data[j * img_step + i * img_channels + 2]) > field->color[MINR] && (img_data[j * img_step + i * img_channels + 2]) < field->color[MAXR]) { filtered_data_b[j * filtered_step + i * filtered_channels] = 255; } else { filtered_data_b[j * filtered_step + i * filtered_channels] = 0; } } } //cvErode(field->treat, field->treat, 0, 1); cvMoments(field->treat, moments, 1); // The actual moment values B double moment10 = cvGetSpatialMoment(moments, 1, 0); double moment01 = cvGetSpatialMoment(moments, 0, 1); double area = cvGetCentralMoment(moments, 0, 0); field->position->x = (int) moment10 / area; field->position->y = (int) moment01 / area; pthread_exit(NULL); }
double BlobContour::GetMoment(int p, int q) { // is a valid moment? if (p < 0 || q < 0 || p > MAX_MOMENTS_ORDER || q > MAX_MOMENTS_ORDER) { return -1; } if (IsEmpty()) return 0; // it is calculated? if (m_moments.m00 == -1) { cvMoments(GetContourPoints(), &m_moments); } return cvGetSpatialMoment(&m_moments, p, q); }
/**************************************************************** ** fit_params *************************************************** ****************************************************************/ CFitParamsReturnType fit_params( CvMoments *pState, double *x0, double *y0, double *Mu00, double *Uu11, double *Uu20, double *Uu02, int width, int height, unsigned char *img, int img_step ) { double Mu10, Mu01; double val; CvMat arr; arr = fi2cv_view(img, img_step, width, height); /* get moments */ int binary=0; // not a binary image? CHKCV( cvMoments( &arr, pState, binary ), CFitParamsOtherError); /* calculate center of gravity from spatial moments */ CHKCV( val = cvGetSpatialMoment( pState, 0, 0), CFitParamsOtherError); *Mu00 = val; CHKCV( Mu10 = cvGetSpatialMoment( pState, 1, 0), CFitParamsOtherError); CHKCV( Mu01 = cvGetSpatialMoment( pState, 0, 1), CFitParamsOtherError); if( *Mu00 != 0.0 ) { *x0 = Mu10 / *Mu00; *y0 = Mu01 / *Mu00; /* relative to ROI origin */ } else { return CFitParamsZeroMomentError; } /* calculate blob orientation from central moments */ CHKCV( val = cvGetCentralMoment( pState, 1, 1), CFitParamsCentralMomentError); *Uu11 = val; CHKCV( val = cvGetCentralMoment( pState, 2, 0), CFitParamsCentralMomentError); *Uu20 = val; CHKCV( val = cvGetCentralMoment( pState, 0, 2), CFitParamsCentralMomentError); *Uu02 = val; return CFitParamsNoError; }
IplImage* contour(IplImage* img) { static int i; char fileName[20]; CvMemStorage* store; IplImage* aux=NULL; if(aux == NULL) { aux = cvCreateImage(cvGetSize(img),8,1); store = cvCreateMemStorage(0); } CvSeq * contours =0; cvFindContours(img,store,&contours); //finding contours in an image cvZero(aux); //if(contours->total) { cvDrawContours(aux,contours,cvScalarAll(255),cvScalarAll(255),100); } CvMoments *moments = (CvMoments*)malloc(sizeof(CvMoments)); double M00, M01, M10; fruitCount=0; while(contours!=NULL) //detects the moments means coords of individual contours { if( cvContourArea(contours,CV_WHOLE_SEQ) < 5 ) //detects only sizable objects { contours = contours->h_next; continue; } cvMoments(contours, moments); M00 = cvGetSpatialMoment(moments,0,0); M10 = cvGetSpatialMoment(moments,1,0); M01 = cvGetSpatialMoment(moments,0,1); centers[fruitCount].x = (int)(M10/M00); //global variable, stores the centre coords of an object centers[fruitCount].y = (int)(M01/M00); fruitCount++; //important global variable, it represents the total no. of objects detected in the image if it is zero the no action :) contours = contours->h_next; } cvClearMemStorage(store); return aux; }
std::vector<CvPoint> CaptureManager::GetTrajectory(int c) { std::vector<CvPoint> traj(frameCount); for (int i=0; i<frameCount; i++) { if (Access(i,0,false, true)->contourArray.size() <= c) traj[i]=cvPoint(-1,-1); else { CvMoments m; double m00,m10,m01; cvMoments(Access(i,0,false, true)->contourArray[c], &m); m00 = cvGetSpatialMoment(&m,0,0); m01 = cvGetSpatialMoment(&m,0,1); m10 = cvGetSpatialMoment(&m,1,0); traj[i] = cvPoint(cvRound(m.m10/m.m00), cvRound(m.m01/m.m00)); } } return traj; }
void trackObject(IplImage* imgThresh, int player){ // Calculate the moments of 'imgThresh' CvMoments *moments = (CvMoments*)malloc(sizeof(CvMoments)); cvMoments(imgThresh, moments, 1); double moment10 = cvGetSpatialMoment(moments, 1, 0); double moment01 = cvGetSpatialMoment(moments, 0, 1); double area = cvGetCentralMoment(moments, 0, 0); // if the area<1000, I consider that the there are no object in the image and it's because of the noise, the area is not zero if(area>1000){ // Draw a yellow line from the previous point to the current point if (player ==1){ posX1 = moment10/area; posY1 = moment01/area; } if (player ==2){ posX2 = moment10/area; posY2 = moment01/area; } lastX1 = posX1; lastY1 = posY1; lastX2 = posX2; lastY2 = posY2; free(moments); } }
void track_object( IplImage *imgThresh ) { // compute the "moments" in imgThresh CvMoments *moments = NULL; if( NULL == (moments = (CvMoments *) malloc( sizeof(*moments) )) ){ // XXX why the hack "malloc" in cpp??!!! QMessageBox::critical(0, QString( "ERROR" ), QString( "Allocation failed." )); return; } cvMoments( imgThresh, moments, 1 ); double area = cvGetCentralMoment( moments, 0, 0 ); double moment10 = cvGetSpatialMoment( moments, 1, 0 ); double moment01 = cvGetSpatialMoment( moments, 0, 1 ); free( moments ); /* if the area is <1000, area is considered noise and ignored */ if( 1000 >= area ){ printf("area too small\n"); return; } // compute position int xpos = moment10 / area; int ypos = moment01 / area; if( 0 <= xlast && 0 <= ylast && 0 <= xpos && 0 <= ypos ){ // draw trace.. printf("x:y - %d:%d\n", xpos, ypos); cvLine( imgTracking, cvPoint( xpos, ypos ), cvPoint( xlast, ylast ), cvScalar( 0, 0, 255), 5 ); } xlast = xpos; ylast = ypos; }
// -------------------------------------------------------------------------- void BlobModel::FindBlobs(IplImage* frame, IplImage* mask, double confidence) { if (!m_initialized) return; // clear the blob array blob.RemoveAll(); // detect contours and populate array of blobs CvSeq* contour = 0; cvFindContours(mask, OpenCVmemory, &contour, sizeof(CvContour), CV_RETR_CCOMP, CV_CHAIN_APPROX_SIMPLE); int nPix = frame->width*frame->height; for(CvSeq* cPtr = contour ; cPtr != 0; cPtr = cPtr->h_next) { // discard noise contours CvMoments moments; cvMoments(cPtr , &moments); double area = cvGetCentralMoment(&moments, 0, 0); // area if (area > doc->blobmodel.m_minArea*nPix && area < doc->blobmodel.m_maxArea*nPix) { Blob *bl = new Blob(cPtr, doc); bl->m_confidence = confidence; blob.Add(bl); } } }
int main(int argc, char* argv[]) { int counter; wchar_t auxstr[20]; printf("start!\n"); printf("PUCK MINH: %d\n",minH); printf("PUCK MAXH: %d\n",maxH); printf("PUCK MINS: %d\n",minS); printf("PUCK MAXS: %d\n",maxS); printf("PUCK MINV: %d\n",minV); printf("PUCK MAXV: %d\n",maxV); printf("ROBOT MINH: %d\n",RminH); printf("ROBOT MAXH: %d\n",RmaxH); printf("ROBOT MINS: %d\n",RminS); printf("ROBOT MAXS: %d\n",RmaxS); printf("ROBOT MINV: %d\n",RminV); printf("ROBOT MAXV: %d\n",RmaxV); printf("FPS: %d\n",fps); //pwm initialize if(gpioInitialise() < 0) return 1; // 読み込み画像ファイル名 char imgfile[] = "camera/photodir/capmallet1.png"; char imgfile2[] = "camera/photodir/capmallet2.png"; // 画像の読み込み img = cvLoadImage(imgfile, CV_LOAD_IMAGE_ANYCOLOR | CV_LOAD_IMAGE_ANYDEPTH); img2 = cvLoadImage(imgfile2, CV_LOAD_IMAGE_ANYCOLOR | CV_LOAD_IMAGE_ANYDEPTH); // 画像の表示用ウィンドウ生成 cvNamedWindow("circle_sample", CV_WINDOW_AUTOSIZE); cvNamedWindow("circle_sample2", CV_WINDOW_AUTOSIZE); //cvNamedWindow("cv_ColorExtraction"); // Init font cvInitFont(&font,CV_FONT_HERSHEY_SIMPLEX|CV_FONT_ITALIC, 0.4,0.4,0,1); IplImage* dst_img_mallett = cvCreateImage(cvGetSize(img), IPL_DEPTH_8U, 3); IplImage* dst_img_pack = cvCreateImage(cvGetSize(img), IPL_DEPTH_8U, 3); IplImage* dst_img2_mallett = cvCreateImage(cvGetSize(img), IPL_DEPTH_8U, 3); IplImage* dst_img2_pack = cvCreateImage(cvGetSize(img), IPL_DEPTH_8U, 3); //白抽出0,255,0,15,240,255 //黒抽出0, 255, 0, 50, 0, 100 //青検出0, 255, 50, 200, 100, 180 //cv_ColorExtraction(img, dst_img_mallett, CV_BGR2HSV, 0, 255, 50, 200, 100, 180); cv_ColorExtraction(img, dst_img_pack, CV_BGR2HSV, 0, 255, 0, 50, 0, 100); cv_ColorExtraction(img2, dst_img2_mallett, CV_BGR2HSV, 0, 255, 50, 200, 100, 180); cv_ColorExtraction(img2, dst_img2_pack, CV_BGR2HSV, 0, 255, 0, 50, 0, 100); //CvMoments moment_mallett; CvMoments moment_pack; CvMoments moment2_mallett; CvMoments moment2_pack; //cvSetImageCOI(dst_img_mallett, 1); cvSetImageCOI(dst_img_pack, 1); cvSetImageCOI(dst_img2_mallett, 1); cvSetImageCOI(dst_img2_pack, 1); //cvMoments(dst_img_mallett, &moment_mallett, 0); cvMoments(dst_img_pack, &moment_pack, 0); cvMoments(dst_img2_mallett, &moment2_mallett, 0); cvMoments(dst_img2_pack, &moment2_pack, 0); //座標計算 double m00_before = cvGetSpatialMoment(&moment_pack, 0, 0); double m10_before = cvGetSpatialMoment(&moment_pack, 1, 0); double m01_before = cvGetSpatialMoment(&moment_pack, 0, 1); double m00_after = cvGetSpatialMoment(&moment2_pack, 0, 0); double m10_after = cvGetSpatialMoment(&moment2_pack, 1, 0); double m01_after = cvGetSpatialMoment(&moment2_pack, 0, 1); double gX_before = m10_before/m00_before; double gY_before = m01_before/m00_before; double gX_after = m10_after/m00_after; double gY_after = m01_after/m00_after; double m00_mallett = cvGetSpatialMoment(&moment2_mallett, 0, 0); double m10_mallett = cvGetSpatialMoment(&moment2_mallett, 1, 0); double m01_mallett = cvGetSpatialMoment(&moment2_mallett, 0, 1); double gX_now_mallett = m10_mallett/m00_mallett; double gY_now_mallett = m01_mallett/m00_mallett; cvCircle(img2, cvPoint((int)gX_before, (int)gY_before), 50, CV_RGB(0,0,255), 6, 8, 0); cvLine(img2, cvPoint((int)gX_before, (int)gY_before), cvPoint((int)gX_after, (int)gY_after), cvScalar(0,255,0), 2); int target_destanceY = 480 - 30;//Y座標の距離を一定にしている。ディフェンスライン。 //パックの移動は直線のため、一次関数の計算を使って、その後の軌跡を予測する。 double a_inclination = (gY_after - gY_before) / (gX_after - gX_before); double b_intercept = gY_after - a_inclination * gX_after; printf("gX_after: %f\n",gX_after); printf("gY_after: %f\n",gY_after); printf("gX_before: %f\n",gX_before); printf("gY_before: %f\n",gY_before); printf("a_inclination: %f\n",a_inclination); printf("b_intercept: %f\n",b_intercept); int target_coordinateX = (int)((target_destanceY - b_intercept) / a_inclination); printf("target_coordinateX: %d\n",target_coordinateX); cvLine(img2, cvPoint((int)gX_after, (int)gY_after), cvPoint((int)target_coordinateX, target_destanceY), cvScalar(0,255,255), 2); cvLine(img2, cvPoint(640, target_destanceY), cvPoint(0, target_destanceY), cvScalar(255,255,0), 2); cvLine(img2, cvPoint((int)gX_now_mallett, (int)gY_now_mallett), cvPoint((int)target_coordinateX, target_destanceY), cvScalar(0,0,255), 2); cvPutText (img2, to_c_char((int)gX_now_mallett), cvPoint(460,30), &font, cvScalar(220,50,50)); cvPutText (img2, to_c_char((int)target_coordinateX), cvPoint(560,30), &font, cvScalar(50,220,220)); int amount_movement = gX_now_mallett - target_coordinateX; //2枚の画像比較1回で移動できる量の計算 int max_amount_movement = CAM_PIX_WIDTH * 0.54 / 1; //CAM_PIX_WIDTH:640, 比較にかかる時間:0.27*2, 端までの移動時間:1s int target_direction; if(amount_movement > 0){ if(max_amount_movement < amount_movement){ amount_movement = max_amount_movement; } target_direction = 0;//時計回り } else if(amount_movement < 0){ amount_movement = -amount_movement;//正の数にする if(max_amount_movement < amount_movement){ amount_movement = max_amount_movement; } target_direction = 1;//反時計回り } //pwm output double set_time_millis= 270 * amount_movement / max_amount_movement;//0.27ms*(0~1) gpioSetMode(18, PI_OUTPUT); gpioSetMode(19, PI_OUTPUT); gpioPWM(18, 128); gpioWrite(19, target_direction); int closest_frequency = gpioSetPWMfrequency(18, 2000); printf("setting_frequency: %d\n", closest_frequency); gpioSetTimerFunc(0, (int)set_time_millis, pwmReset); // 指定したウィンドウ内に画像を表示する cvShowImage("circle_sample", img); cvShowImage("circle_sample2", img2); while(1) { if(cv::waitKey(30) >= 0) { break; } } gpioTerminate(); //Clean up used images cvReleaseImage(&img); // cvReleaseImage (&dst_img); cvDestroyAllWindows() ; return 0; }
//-------------------------------------------------------------------------------- int ContourFinder::findContours( ofxCvGrayscaleImage& input, int minArea, int maxArea, int nConsidered, bool bFindHoles, bool bUseApproximation) { reset(); // opencv will clober the image it detects contours on, so we want to // copy it into a copy before we detect contours. That copy is allocated // if necessary (necessary = (a) not allocated or (b) wrong size) // so be careful if you pass in different sized images to "findContours" // there is a performance penalty, but we think there is not a memory leak // to worry about better to create mutiple contour finders for different // sizes, ie, if you are finding contours in a 640x480 image but also a // 320x240 image better to make two ContourFinder objects then to use // one, because you will get penalized less. if( inputCopy.width == 0 ) { inputCopy.allocate( input.width, input.height ); inputCopy = input; } else { if( inputCopy.width == input.width && inputCopy.height == input.height ) { inputCopy = input; } else { // we are allocated, but to the wrong size -- // been checked for memory leaks, but a warning: // be careful if you call this function with alot of different // sized "input" images!, it does allocation every time // a new size is passed in.... //inputCopy.clear(); inputCopy.allocate( input.width, input.height ); inputCopy = input; } } CvSeq* contour_list = NULL; contour_storage = cvCreateMemStorage( 1000 ); storage = cvCreateMemStorage( 1000 ); CvContourRetrievalMode retrieve_mode = (bFindHoles) ? CV_RETR_LIST : CV_RETR_EXTERNAL; teste = inputCopy.getCvImage(); cvFindContours( teste, contour_storage, &contour_list, sizeof(CvContour), retrieve_mode, bUseApproximation ? CV_CHAIN_APPROX_SIMPLE : CV_CHAIN_APPROX_NONE ); CvSeq* contour_ptr = contour_list; nCvSeqsFound = 0; // put the contours from the linked list, into an array for sorting while( (contour_ptr != NULL) ) { float area = fabs( cvContourArea(contour_ptr, CV_WHOLE_SEQ) ); if( (area > minArea) && (area < maxArea) ) { if (nCvSeqsFound < TOUCH_MAX_CONTOUR_LENGTH){ cvSeqBlobs[nCvSeqsFound] = contour_ptr; // copy the pointer nCvSeqsFound++; } } contour_ptr = contour_ptr->h_next; } // sort the pointers based on size if( nCvSeqsFound > 0 ) { qsort( cvSeqBlobs, nCvSeqsFound, sizeof(CvSeq*), qsort_carea_compare); } // now, we have nCvSeqsFound contours, sorted by size in the array // cvSeqBlobs let's get the data out and into our structures that we like for( int i = 0; i < MIN(nConsidered, nCvSeqsFound); i++ ) { blobs.push_back( Blob() ); float area = cvContourArea( cvSeqBlobs[i], CV_WHOLE_SEQ ); cvMoments( cvSeqBlobs[i], myMoments ); // this is if using non-angle bounding box CvRect rect = cvBoundingRect( cvSeqBlobs[i], 0 ); blobs[i].boundingRect.x = rect.x; blobs[i].boundingRect.y = rect.y; blobs[i].boundingRect.width = rect.width; blobs[i].boundingRect.height = rect.height; cvCamShift(teste, rect, cvTermCriteria( CV_TERMCRIT_EPS | CV_TERMCRIT_ITER, 10, 1 ), &track_comp, &track_box); // this is for using angle bounding box CvBox2D32f box; box = cvMinAreaRect2( cvSeqBlobs[i] ); blobs[i].angleBoundingRect.x = box.center.x; blobs[i].angleBoundingRect.y = box.center.y; blobs[i].angleBoundingRect.width = box.size.height; blobs[i].angleBoundingRect.height = box.size.width; blobs[i].angle = box.angle; // assign other parameters blobs[i].area = fabs(area); blobs[i].hole = area < 0 ? true : false; blobs[i].length = cvArcLength(cvSeqBlobs[i]); blobs[i].centroid.x = (int) (myMoments->m10 / myMoments->m00); blobs[i].centroid.y = (int) (myMoments->m01 / myMoments->m00); blobs[i].lastCentroid.x = (int) 0; blobs[i].lastCentroid.y = (int) 0; // get the points for the blob: CvPoint pt; CvSeqReader reader; cvStartReadSeq( cvSeqBlobs[i], &reader, 0 ); for( int j=0; j < min(TOUCH_MAX_CONTOUR_LENGTH, cvSeqBlobs[i]->total); j++ ) { CV_READ_SEQ_ELEM( pt, reader ); blobs[i].pts.push_back( ofPoint((float)pt.x, (float)pt.y) ); } blobs[i].nPts = blobs[i].pts.size(); } nBlobs = blobs.size(); // Free the storage memory. // Warning: do this inside this function otherwise a strange memory leak if( contour_storage != NULL ) { cvReleaseMemStorage(&contour_storage); } if( storage != NULL ) { cvReleaseMemStorage(&storage); } return nBlobs; }
void cvFindBlobsByCCClasters(IplImage* pFG, CvBlobSeq* pBlobs, CvMemStorage* storage) { /* Create contours: */ IplImage* pIB = NULL; CvSeq* cnt = NULL; CvSeq* cnt_list = cvCreateSeq(0,sizeof(CvSeq),sizeof(CvSeq*), storage ); CvSeq* clasters = NULL; int claster_cur, claster_num; pIB = cvCloneImage(pFG); cvThreshold(pIB,pIB,128,255,CV_THRESH_BINARY); cvFindContours(pIB,storage, &cnt, sizeof(CvContour), CV_RETR_EXTERNAL); cvReleaseImage(&pIB); /* Create cnt_list. */ /* Process each contour: */ for(; cnt; cnt=cnt->h_next) { cvSeqPush( cnt_list, &cnt); } claster_num = cvSeqPartition( cnt_list, storage, &clasters, CompareContour, NULL ); for(claster_cur=0; claster_cur<claster_num; ++claster_cur) { int cnt_cur; CvBlob NewBlob; double M00,X,Y,XX,YY; /* image moments */ CvMoments m; CvRect rect_res = cvRect(-1,-1,-1,-1); CvMat mat; for(cnt_cur=0; cnt_cur<clasters->total; ++cnt_cur) { CvRect rect; CvSeq* cnt; int k = *(int*)cvGetSeqElem( clasters, cnt_cur ); if(k!=claster_cur) continue; cnt = *(CvSeq**)cvGetSeqElem( cnt_list, cnt_cur ); rect = ((CvContour*)cnt)->rect; if(rect_res.height<0) { rect_res = rect; } else { /* Unite rects: */ int x0,x1,y0,y1; x0 = MIN(rect_res.x,rect.x); y0 = MIN(rect_res.y,rect.y); x1 = MAX(rect_res.x+rect_res.width,rect.x+rect.width); y1 = MAX(rect_res.y+rect_res.height,rect.y+rect.height); rect_res.x = x0; rect_res.y = y0; rect_res.width = x1-x0; rect_res.height = y1-y0; } } if(rect_res.height < 1 || rect_res.width < 1) { X = 0; Y = 0; XX = 0; YY = 0; } else { cvMoments( cvGetSubRect(pFG,&mat,rect_res), &m, 0 ); M00 = cvGetSpatialMoment( &m, 0, 0 ); if(M00 <= 0 ) continue; X = cvGetSpatialMoment( &m, 1, 0 )/M00; Y = cvGetSpatialMoment( &m, 0, 1 )/M00; XX = (cvGetSpatialMoment( &m, 2, 0 )/M00) - X*X; YY = (cvGetSpatialMoment( &m, 0, 2 )/M00) - Y*Y; } NewBlob = cvBlob(rect_res.x+(float)X,rect_res.y+(float)Y,(float)(4*sqrt(XX)),(float)(4*sqrt(YY))); pBlobs->AddBlob(&NewBlob); } /* Next cluster. */ #if 0 { // Debug info: IplImage* pI = cvCreateImage(cvSize(pFG->width,pFG->height),IPL_DEPTH_8U,3); cvZero(pI); for(claster_cur=0; claster_cur<claster_num; ++claster_cur) { int cnt_cur; CvScalar color = CV_RGB(rand()%256,rand()%256,rand()%256); for(cnt_cur=0; cnt_cur<clasters->total; ++cnt_cur) { CvSeq* cnt; int k = *(int*)cvGetSeqElem( clasters, cnt_cur ); if(k!=claster_cur) continue; cnt = *(CvSeq**)cvGetSeqElem( cnt_list, cnt_cur ); cvDrawContours( pI, cnt, color, color, 0, 1, 8); } CvBlob* pB = pBlobs->GetBlob(claster_cur); int x = cvRound(CV_BLOB_RX(pB)), y = cvRound(CV_BLOB_RY(pB)); cvEllipse( pI, cvPointFrom32f(CV_BLOB_CENTER(pB)), cvSize(MAX(1,x), MAX(1,y)), 0, 0, 360, color, 1 ); } cvNamedWindow( "Clusters", 0); cvShowImage( "Clusters",pI ); cvReleaseImage(&pI); } /* Debug info. */ #endif } /* cvFindBlobsByCCClasters */
/* cvDetectNewBlobs * Return 1 and fill blob pNewBlob with * blob parameters if new blob is detected: */ int CvBlobDetectorCC::DetectNewBlob(IplImage* /*pImg*/, IplImage* pFGMask, CvBlobSeq* pNewBlobList, CvBlobSeq* pOldBlobList) { int result = 0; CvSize S = cvSize(pFGMask->width,pFGMask->height); /* Shift blob list: */ { int i; if(m_pBlobLists[SEQ_SIZE-1]) delete m_pBlobLists[SEQ_SIZE-1]; for(i=SEQ_SIZE-1; i>0; --i) m_pBlobLists[i] = m_pBlobLists[i-1]; m_pBlobLists[0] = new CvBlobSeq; } /* Shift blob list. */ /* Create contours and add new blobs to blob list: */ { /* Create blobs: */ CvBlobSeq Blobs; CvMemStorage* storage = cvCreateMemStorage(); if(m_Clastering) { /* Glue contours: */ cvFindBlobsByCCClasters(pFGMask, &Blobs, storage ); } /* Glue contours. */ else { /**/ IplImage* pIB = cvCloneImage(pFGMask); CvSeq* cnts = NULL; CvSeq* cnt = NULL; cvThreshold(pIB,pIB,128,255,CV_THRESH_BINARY); cvFindContours(pIB,storage, &cnts, sizeof(CvContour), CV_RETR_EXTERNAL); /* Process each contour: */ for(cnt = cnts; cnt; cnt=cnt->h_next) { CvBlob NewBlob; /* Image moments: */ double M00,X,Y,XX,YY; CvMoments m; CvRect r = ((CvContour*)cnt)->rect; CvMat mat; if(r.height < S.height*m_HMin || r.width < S.width*m_WMin) continue; cvMoments( cvGetSubRect(pFGMask,&mat,r), &m, 0 ); M00 = cvGetSpatialMoment( &m, 0, 0 ); if(M00 <= 0 ) continue; X = cvGetSpatialMoment( &m, 1, 0 )/M00; Y = cvGetSpatialMoment( &m, 0, 1 )/M00; XX = (cvGetSpatialMoment( &m, 2, 0 )/M00) - X*X; YY = (cvGetSpatialMoment( &m, 0, 2 )/M00) - Y*Y; NewBlob = cvBlob(r.x+(float)X,r.y+(float)Y,(float)(4*sqrt(XX)),(float)(4*sqrt(YY))); Blobs.AddBlob(&NewBlob); } /* Next contour. */ cvReleaseImage(&pIB); } /* One contour - one blob. */ { /* Delete small and intersected blobs: */ int i; for(i=Blobs.GetBlobNum(); i>0; i--) { CvBlob* pB = Blobs.GetBlob(i-1); if(pB->h < S.height*m_HMin || pB->w < S.width*m_WMin) { Blobs.DelBlob(i-1); continue; } if(pOldBlobList) { int j; for(j=pOldBlobList->GetBlobNum(); j>0; j--) { CvBlob* pBOld = pOldBlobList->GetBlob(j-1); if((fabs(pBOld->x-pB->x) < (CV_BLOB_RX(pBOld)+CV_BLOB_RX(pB))) && (fabs(pBOld->y-pB->y) < (CV_BLOB_RY(pBOld)+CV_BLOB_RY(pB)))) { /* Intersection detected, delete blob from list: */ Blobs.DelBlob(i-1); break; } } /* Check next old blob. */ } /* if pOldBlobList. */ } /* Check next blob. */ } /* Delete small and intersected blobs. */ { /* Bubble-sort blobs by size: */ int N = Blobs.GetBlobNum(); int i,j; for(i=1; i<N; ++i) { for(j=i; j>0; --j) { CvBlob temp; float AreaP, AreaN; CvBlob* pP = Blobs.GetBlob(j-1); CvBlob* pN = Blobs.GetBlob(j); AreaP = CV_BLOB_WX(pP)*CV_BLOB_WY(pP); AreaN = CV_BLOB_WX(pN)*CV_BLOB_WY(pN); if(AreaN < AreaP)break; temp = pN[0]; pN[0] = pP[0]; pP[0] = temp; } } /* Copy only first 10 blobs: */ for(i=0; i<MIN(N,10); ++i) { m_pBlobLists[0]->AddBlob(Blobs.GetBlob(i)); } } /* Sort blobs by size. */ cvReleaseMemStorage(&storage); } /* Create blobs. */ { /* Shift each track: */ int j; for(j=0; j<m_TrackNum; ++j) { int i; DefSeq* pTrack = m_TrackSeq+j; for(i=SEQ_SIZE-1; i>0; --i) pTrack->pBlobs[i] = pTrack->pBlobs[i-1]; pTrack->pBlobs[0] = NULL; if(pTrack->size == SEQ_SIZE)pTrack->size--; } } /* Shift each track. */ /* Analyze blob list to find best blob trajectory: */ { double BestError = -1; int BestTrack = -1;; CvBlobSeq* pNewBlobs = m_pBlobLists[0]; int i; int NewTrackNum = 0; for(i=pNewBlobs->GetBlobNum(); i>0; --i) { CvBlob* pBNew = pNewBlobs->GetBlob(i-1); int j; int AsignedTrack = 0; for(j=0; j<m_TrackNum; ++j) { double dx,dy; DefSeq* pTrack = m_TrackSeq+j; CvBlob* pLastBlob = pTrack->size>0?pTrack->pBlobs[1]:NULL; if(pLastBlob == NULL) continue; dx = fabs(CV_BLOB_X(pLastBlob)-CV_BLOB_X(pBNew)); dy = fabs(CV_BLOB_Y(pLastBlob)-CV_BLOB_Y(pBNew)); if(dx > 2*CV_BLOB_WX(pLastBlob) || dy > 2*CV_BLOB_WY(pLastBlob)) continue; AsignedTrack++; if(pTrack->pBlobs[0]==NULL) { /* Fill existed track: */ pTrack->pBlobs[0] = pBNew; pTrack->size++; } else if((m_TrackNum+NewTrackNum)<SEQ_NUM) { /* Duplicate existed track: */ m_TrackSeq[m_TrackNum+NewTrackNum] = pTrack[0]; m_TrackSeq[m_TrackNum+NewTrackNum].pBlobs[0] = pBNew; NewTrackNum++; } } /* Next track. */ if(AsignedTrack==0 && (m_TrackNum+NewTrackNum)<SEQ_NUM ) { /* Initialize new track: */ m_TrackSeq[m_TrackNum+NewTrackNum].size = 1; m_TrackSeq[m_TrackNum+NewTrackNum].pBlobs[0] = pBNew; NewTrackNum++; } } /* Next new blob. */ m_TrackNum += NewTrackNum; /* Check each track: */ for(i=0; i<m_TrackNum; ++i) { int Good = 1; DefSeq* pTrack = m_TrackSeq+i; CvBlob* pBNew = pTrack->pBlobs[0]; if(pTrack->size != SEQ_SIZE) continue; if(pBNew == NULL ) continue; /* Check intersection last blob with existed: */ if(Good && pOldBlobList) { int k; for(k=pOldBlobList->GetBlobNum(); k>0; --k) { CvBlob* pBOld = pOldBlobList->GetBlob(k-1); if((fabs(pBOld->x-pBNew->x) < (CV_BLOB_RX(pBOld)+CV_BLOB_RX(pBNew))) && (fabs(pBOld->y-pBNew->y) < (CV_BLOB_RY(pBOld)+CV_BLOB_RY(pBNew)))) Good = 0; } } /* Check intersection last blob with existed. */ /* Check distance to image border: */ if(Good) { /* Check distance to image border: */ float dx = MIN(pBNew->x,S.width-pBNew->x)/CV_BLOB_RX(pBNew); float dy = MIN(pBNew->y,S.height-pBNew->y)/CV_BLOB_RY(pBNew); if(dx < m_MinDistToBorder || dy < m_MinDistToBorder) Good = 0; } /* Check distance to image border. */ /* Check uniform motion: */ if(Good) { /* Check uniform motion: */ double Error = 0; int N = pTrack->size; CvBlob** pBL = pTrack->pBlobs; float sum[2] = {0,0}; float jsum[2] = {0,0}; float a[2],b[2]; /* estimated parameters of moving x(t) = a*t+b*/ int j; for(j=0; j<N; ++j) { float x = pBL[j]->x; float y = pBL[j]->y; sum[0] += x; jsum[0] += j*x; sum[1] += y; jsum[1] += j*y; } a[0] = 6*((1-N)*sum[0]+2*jsum[0])/(N*(N*N-1)); b[0] = -2*((1-2*N)*sum[0]+3*jsum[0])/(N*(N+1)); a[1] = 6*((1-N)*sum[1]+2*jsum[1])/(N*(N*N-1)); b[1] = -2*((1-2*N)*sum[1]+3*jsum[1])/(N*(N+1)); for(j=0; j<N; ++j) { Error += pow(a[0]*j+b[0]-pBL[j]->x,2)+ pow(a[1]*j+b[1]-pBL[j]->y,2); } Error = sqrt(Error/N); if( Error > S.width*0.01 || fabs(a[0])>S.width*0.1 || fabs(a[1])>S.height*0.1) Good = 0; /* New best trajectory: */ if(Good && (BestError == -1 || BestError > Error)) { /* New best trajectory: */ BestTrack = i; BestError = Error; } /* New best trajectory. */ } /* Check uniform motion. */ } /* Next track. */ #if 0 { /**/ printf("BlobDetector configurations = %d [",m_TrackNum); int i; for(i=0; i<SEQ_SIZE; ++i) { printf("%d,",m_pBlobLists[i]?m_pBlobLists[i]->GetBlobNum():0); } printf("]\n"); } #endif if(BestTrack >= 0) { /* Put new blob to output and delete from blob list: */ assert(m_TrackSeq[BestTrack].size == SEQ_SIZE); assert(m_TrackSeq[BestTrack].pBlobs[0]); pNewBlobList->AddBlob(m_TrackSeq[BestTrack].pBlobs[0]); m_TrackSeq[BestTrack].pBlobs[0] = NULL; m_TrackSeq[BestTrack].size--; result = 1; } /* Put new blob to output and mark in blob list to delete. */ } /* Analyze blod list to find best blob trajectory. */ { /* Delete bad tracks: */ int i; for(i=m_TrackNum-1; i>=0; --i) { /* Delete bad tracks: */ if(m_TrackSeq[i].pBlobs[0]) continue; if(m_TrackNum>0) m_TrackSeq[i] = m_TrackSeq[--m_TrackNum]; } /* Delete bad tracks: */ } #ifdef USE_OBJECT_DETECTOR if( m_split_detector && pNewBlobList->GetBlobNum() > 0 ) { int num_new_blobs = pNewBlobList->GetBlobNum(); int i = 0; if( m_roi_seq ) cvClearSeq( m_roi_seq ); m_debug_blob_seq.Clear(); for( i = 0; i < num_new_blobs; ++i ) { CvBlob* b = pNewBlobList->GetBlob(i); CvMat roi_stub; CvMat* roi_mat = 0; CvMat* scaled_roi_mat = 0; CvDetectedBlob d_b = cvDetectedBlob( CV_BLOB_X(b), CV_BLOB_Y(b), CV_BLOB_WX(b), CV_BLOB_WY(b), 0 ); m_debug_blob_seq.AddBlob(&d_b); float scale = m_param_roi_scale * m_min_window_size.height / CV_BLOB_WY(b); float b_width = MAX(CV_BLOB_WX(b), m_min_window_size.width / scale) + (m_param_roi_scale - 1.0F) * (m_min_window_size.width / scale) + 2.0F * m_max_border / scale; float b_height = CV_BLOB_WY(b) * m_param_roi_scale + 2.0F * m_max_border / scale; CvRect roi = cvRectIntersection( cvRect( cvFloor(CV_BLOB_X(b) - 0.5F*b_width), cvFloor(CV_BLOB_Y(b) - 0.5F*b_height), cvCeil(b_width), cvCeil(b_height) ), cvRect( 0, 0, pImg->width, pImg->height ) ); if( roi.width <= 0 || roi.height <= 0 ) continue; if( m_roi_seq ) cvSeqPush( m_roi_seq, &roi ); roi_mat = cvGetSubRect( pImg, &roi_stub, roi ); scaled_roi_mat = cvCreateMat( cvCeil(scale*roi.height), cvCeil(scale*roi.width), CV_8UC3 ); cvResize( roi_mat, scaled_roi_mat ); m_detected_blob_seq.Clear(); m_split_detector->Detect( scaled_roi_mat, &m_detected_blob_seq ); cvReleaseMat( &scaled_roi_mat ); for( int k = 0; k < m_detected_blob_seq.GetBlobNum(); ++k ) { CvDetectedBlob* b = (CvDetectedBlob*) m_detected_blob_seq.GetBlob(k); /* scale and shift each detected blob back to the original image coordinates */ CV_BLOB_X(b) = CV_BLOB_X(b) / scale + roi.x; CV_BLOB_Y(b) = CV_BLOB_Y(b) / scale + roi.y; CV_BLOB_WX(b) /= scale; CV_BLOB_WY(b) /= scale; CvDetectedBlob d_b = cvDetectedBlob( CV_BLOB_X(b), CV_BLOB_Y(b), CV_BLOB_WX(b), CV_BLOB_WY(b), 1, b->response ); m_debug_blob_seq.AddBlob(&d_b); } if( m_detected_blob_seq.GetBlobNum() > 1 ) { /* * Split blob. * The original blob is replaced by the first detected blob, * remaining detected blobs are added to the end of the sequence: */ CvBlob* first_b = m_detected_blob_seq.GetBlob(0); CV_BLOB_X(b) = CV_BLOB_X(first_b); CV_BLOB_Y(b) = CV_BLOB_Y(first_b); CV_BLOB_WX(b) = CV_BLOB_WX(first_b); CV_BLOB_WY(b) = CV_BLOB_WY(first_b); for( int j = 1; j < m_detected_blob_seq.GetBlobNum(); ++j ) { CvBlob* detected_b = m_detected_blob_seq.GetBlob(j); pNewBlobList->AddBlob(detected_b); } } } /* For each new blob. */ for( i = 0; i < pNewBlobList->GetBlobNum(); ++i ) { CvBlob* b = pNewBlobList->GetBlob(i); CvDetectedBlob d_b = cvDetectedBlob( CV_BLOB_X(b), CV_BLOB_Y(b), CV_BLOB_WX(b), CV_BLOB_WY(b), 2 ); m_debug_blob_seq.AddBlob(&d_b); } } // if( m_split_detector ) #endif return result; } /* cvDetectNewBlob */
/* cvDetectNewBlobs * return 1 and fill blob pNewBlob by blob parameters * if new blob is detected: */ int CvBlobDetectorSimple::DetectNewBlob(IplImage* /*pImg*/, IplImage* pFGMask, CvBlobSeq* pNewBlobList, CvBlobSeq* pOldBlobList) { int result = 0; CvSize S = cvSize(pFGMask->width,pFGMask->height); if(m_pMaskBlobNew == NULL ) m_pMaskBlobNew = cvCreateImage(S,IPL_DEPTH_8U,1); if(m_pMaskBlobExist == NULL ) m_pMaskBlobExist = cvCreateImage(S,IPL_DEPTH_8U,1); /* Shift blob list: */ { int i; if(m_pBlobLists[0]) delete m_pBlobLists[0]; for(i=1;i<EBD_FRAME_NUM;++i)m_pBlobLists[i-1]=m_pBlobLists[i]; m_pBlobLists[EBD_FRAME_NUM-1] = new CvBlobSeq; } /* Shift blob list. */ /* Create exist blob mask: */ cvCopy(pFGMask, m_pMaskBlobNew); /* Create contours and add new blobs to blob list: */ { /* Create blobs: */ CvBlobSeq Blobs; CvMemStorage* storage = cvCreateMemStorage(); #if 1 { /* Glue contours: */ cvFindBlobsByCCClasters(m_pMaskBlobNew, &Blobs, storage ); } /* Glue contours. */ #else { /**/ IplImage* pIB = cvCloneImage(m_pMaskBlobNew); CvSeq* cnts = NULL; CvSeq* cnt = NULL; cvThreshold(pIB,pIB,128,255,CV_THRESH_BINARY); cvFindContours(pIB,storage, &cnts, sizeof(CvContour), CV_RETR_EXTERNAL); /* Process each contour: */ for(cnt = cnts; cnt; cnt=cnt->h_next) { CvBlob NewBlob; /* Image moments: */ double M00,X,Y,XX,YY; CvMoments m; CvRect r = ((CvContour*)cnt)->rect; CvMat mat; if(r.height < S.height*0.02 || r.width < S.width*0.02) continue; cvMoments( cvGetSubRect(m_pMaskBlobNew,&mat,r), &m, 0 ); M00 = cvGetSpatialMoment( &m, 0, 0 ); if(M00 <= 0 ) continue; X = cvGetSpatialMoment( &m, 1, 0 )/M00; Y = cvGetSpatialMoment( &m, 0, 1 )/M00; XX = (cvGetSpatialMoment( &m, 2, 0 )/M00) - X*X; YY = (cvGetSpatialMoment( &m, 0, 2 )/M00) - Y*Y; NewBlob = cvBlob(r.x+(float)X,r.y+(float)Y,(float)(4*sqrt(XX)),(float)(4*sqrt(YY))); Blobs.AddBlob(&NewBlob); } /* Next contour. */ cvReleaseImage(&pIB); } /* One contour - one blob. */ #endif { /* Delete small and intersected blobs: */ int i; for(i=Blobs.GetBlobNum(); i>0; i--) { CvBlob* pB = Blobs.GetBlob(i-1); if(pB->h < S.height*0.02 || pB->w < S.width*0.02) { Blobs.DelBlob(i-1); continue; } if(pOldBlobList) { int j; for(j=pOldBlobList->GetBlobNum(); j>0; j--) { CvBlob* pBOld = pOldBlobList->GetBlob(j-1); if((fabs(pBOld->x-pB->x) < (CV_BLOB_RX(pBOld)+CV_BLOB_RX(pB))) && (fabs(pBOld->y-pB->y) < (CV_BLOB_RY(pBOld)+CV_BLOB_RY(pB)))) { /* Intersection is present, so delete blob from list: */ Blobs.DelBlob(i-1); break; } } /* Check next old blob. */ } /* if pOldBlobList */ } /* Check next blob. */ } /* Delete small and intersected blobs. */ { /* Bubble-sort blobs by size: */ int N = Blobs.GetBlobNum(); int i,j; for(i=1; i<N; ++i) { for(j=i; j>0; --j) { CvBlob temp; float AreaP, AreaN; CvBlob* pP = Blobs.GetBlob(j-1); CvBlob* pN = Blobs.GetBlob(j); AreaP = CV_BLOB_WX(pP)*CV_BLOB_WY(pP); AreaN = CV_BLOB_WX(pN)*CV_BLOB_WY(pN); if(AreaN < AreaP)break; temp = pN[0]; pN[0] = pP[0]; pP[0] = temp; } } /* Copy only first 10 blobs: */ for(i=0; i<MIN(N,10); ++i) { m_pBlobLists[EBD_FRAME_NUM-1]->AddBlob(Blobs.GetBlob(i)); } } /* Sort blobs by size. */ cvReleaseMemStorage(&storage); } /* Create blobs. */ /* Analyze blob list to find best blob trajectory: */ { int Count = 0; int pBLIndex[EBD_FRAME_NUM]; int pBL_BEST[EBD_FRAME_NUM]; int i; int finish = 0; double BestError = -1; int Good = 1; for(i=0; i<EBD_FRAME_NUM; ++i) { pBLIndex[i] = 0; pBL_BEST[i] = 0; } /* Check configuration exist: */ for(i=0; Good && (i<EBD_FRAME_NUM); ++i) if(m_pBlobLists[i] == NULL || m_pBlobLists[i]->GetBlobNum() == 0) Good = 0; if(Good) do{ /* For each configuration: */ CvBlob* pBL[EBD_FRAME_NUM]; int Good = 1; double Error = 0; CvBlob* pBNew = m_pBlobLists[EBD_FRAME_NUM-1]->GetBlob(pBLIndex[EBD_FRAME_NUM-1]); for(i=0; i<EBD_FRAME_NUM; ++i) pBL[i] = m_pBlobLists[i]->GetBlob(pBLIndex[i]); Count++; /* Check intersection last blob with existed: */ if(Good && pOldBlobList) { /* Check intersection last blob with existed: */ int k; for(k=pOldBlobList->GetBlobNum(); k>0; --k) { CvBlob* pBOld = pOldBlobList->GetBlob(k-1); if((fabs(pBOld->x-pBNew->x) < (CV_BLOB_RX(pBOld)+CV_BLOB_RX(pBNew))) && (fabs(pBOld->y-pBNew->y) < (CV_BLOB_RY(pBOld)+CV_BLOB_RY(pBNew)))) Good = 0; } } /* Check intersection last blob with existed. */ /* Check distance to image border: */ if(Good) { /* Check distance to image border: */ CvBlob* pB = pBNew; float dx = MIN(pB->x,S.width-pB->x)/CV_BLOB_RX(pB); float dy = MIN(pB->y,S.height-pB->y)/CV_BLOB_RY(pB); if(dx < 1.1 || dy < 1.1) Good = 0; } /* Check distance to image border. */ /* Check uniform motion: */ if(Good) { int N = EBD_FRAME_NUM; float sum[2] = {0,0}; float jsum[2] = {0,0}; float a[2],b[2]; /* estimated parameters of moving x(t) = a*t+b*/ int j; for(j=0; j<N; ++j) { float x = pBL[j]->x; float y = pBL[j]->y; sum[0] += x; jsum[0] += j*x; sum[1] += y; jsum[1] += j*y; } a[0] = 6*((1-N)*sum[0]+2*jsum[0])/(N*(N*N-1)); b[0] = -2*((1-2*N)*sum[0]+3*jsum[0])/(N*(N+1)); a[1] = 6*((1-N)*sum[1]+2*jsum[1])/(N*(N*N-1)); b[1] = -2*((1-2*N)*sum[1]+3*jsum[1])/(N*(N+1)); for(j=0; j<N; ++j) { Error += pow(a[0]*j+b[0]-pBL[j]->x,2)+ pow(a[1]*j+b[1]-pBL[j]->y,2); } Error = sqrt(Error/N); if( Error > S.width*0.01 || fabs(a[0])>S.width*0.1 || fabs(a[1])>S.height*0.1) Good = 0; } /* Check configuration. */ /* New best trajectory: */ if(Good && (BestError == -1 || BestError > Error)) { for(i=0; i<EBD_FRAME_NUM; ++i) { pBL_BEST[i] = pBLIndex[i]; } BestError = Error; } /* New best trajectory. */ /* Set next configuration: */ for(i=0; i<EBD_FRAME_NUM; ++i) { pBLIndex[i]++; if(pBLIndex[i] != m_pBlobLists[i]->GetBlobNum()) break; pBLIndex[i]=0; } /* Next time shift. */ if(i==EBD_FRAME_NUM)finish=1; } while(!finish); /* Check next time configuration of connected components. */ #if 0 {/**/ printf("BlobDetector configurations = %d [",Count); int i; for(i=0; i<EBD_FRAME_NUM; ++i) { printf("%d,",m_pBlobLists[i]?m_pBlobLists[i]->GetBlobNum():0); } printf("]\n"); } #endif if(BestError != -1) { /* Write new blob to output and delete from blob list: */ CvBlob* pNewBlob = m_pBlobLists[EBD_FRAME_NUM-1]->GetBlob(pBL_BEST[EBD_FRAME_NUM-1]); pNewBlobList->AddBlob(pNewBlob); for(i=0; i<EBD_FRAME_NUM; ++i) { /* Remove blob from each list: */ m_pBlobLists[i]->DelBlob(pBL_BEST[i]); } /* Remove blob from each list. */ result = 1; } /* Write new blob to output and delete from blob list. */ } /* Analyze blob list to find best blob trajectory. */ return result; } /* cvDetectNewBlob */
JNIEXPORT jbooleanArray JNICALL Java_org_siprop_opencv_OpenCV_faceDetect(JNIEnv* env, jobject thiz, jintArray photo_data1, jintArray photo_data2, jint width, jint height) { LOGV("Load desp."); int i, x, y; int* pixels; IplImage *frameImage; IplImage *backgroundImage = cvCreateImage( cvSize(width, height), IPL_DEPTH_8U, 1 ); IplImage *grayImage = cvCreateImage( cvSize(width, height), IPL_DEPTH_8U, 1 ); IplImage *differenceImage = cvCreateImage( cvSize(width, height), IPL_DEPTH_8U, 1 ); IplImage *hsvImage = cvCreateImage( cvSize(width, height), IPL_DEPTH_8U, 3 ); IplImage *hueImage = cvCreateImage( cvSize(width, height), IPL_DEPTH_8U, 1 ); IplImage *saturationImage = cvCreateImage( cvSize(width, height), IPL_DEPTH_8U, 1 ); IplImage *valueImage = cvCreateImage( cvSize(width, height), IPL_DEPTH_8U, 1 ); IplImage *thresholdImage1 = cvCreateImage( cvSize(width, height), IPL_DEPTH_8U, 1 ); IplImage *thresholdImage2 = cvCreateImage( cvSize(width, height), IPL_DEPTH_8U, 1 ); IplImage *thresholdImage3 = cvCreateImage( cvSize(width, height), IPL_DEPTH_8U, 1 ); IplImage *faceImage = cvCreateImage( cvSize(width, height), IPL_DEPTH_8U, 1 ); CvMoments moment; double m_00; double m_10; double m_01; int gravityX; int gravityY; jbooleanArray res_array; int imageSize; // Load Image pixels = env->GetIntArrayElements(photo_data1, 0); frameImage = loadPixels(pixels, width, height); if(frameImage == 0) { LOGV("Error loadPixels."); return 0; } cvCvtColor( frameImage, backgroundImage, CV_BGR2GRAY ); pixels = env->GetIntArrayElements(photo_data2, 0); frameImage = loadPixels(pixels, width, height); if(frameImage == 0) { LOGV("Error loadPixels."); return 0; } cvCvtColor( frameImage, grayImage, CV_BGR2GRAY ); cvAbsDiff( grayImage, backgroundImage, differenceImage ); cvCvtColor( frameImage, hsvImage, CV_BGR2HSV ); LOGV("Load cvCvtColor."); cvSplit( hsvImage, hueImage, saturationImage, valueImage, 0 ); LOGV("Load cvSplit."); cvThreshold( hueImage, thresholdImage1, THRESH_BOTTOM, THRESHOLD_MAX_VALUE, CV_THRESH_BINARY ); cvThreshold( hueImage, thresholdImage2, THRESH_TOP, THRESHOLD_MAX_VALUE, CV_THRESH_BINARY_INV ); cvAnd( thresholdImage1, thresholdImage2, thresholdImage3, 0 ); LOGV("Load cvAnd."); cvAnd( differenceImage, thresholdImage3, faceImage, 0 ); cvMoments( faceImage, &moment, 0 ); m_00 = cvGetSpatialMoment( &moment, 0, 0 ); m_10 = cvGetSpatialMoment( &moment, 1, 0 ); m_01 = cvGetSpatialMoment( &moment, 0, 1 ); gravityX = m_10 / m_00; gravityY = m_01 / m_00; LOGV("Load cvMoments."); cvCircle( frameImage, cvPoint( gravityX, gravityY ), CIRCLE_RADIUS, CV_RGB( 255, 0, 0 ), LINE_THICKNESS, LINE_TYPE, 0 ); CvMat stub, *mat_image; int channels, ipl_depth; mat_image = cvGetMat( frameImage, &stub ); channels = CV_MAT_CN( mat_image->type ); ipl_depth = cvCvToIplDepth(mat_image->type); WLNonFileByteStream* m_strm = new WLNonFileByteStream(); loadImageBytes(mat_image->data.ptr, mat_image->step, mat_image->width, mat_image->height, ipl_depth, channels, m_strm); LOGV("Load loadImageBytes."); imageSize = m_strm->GetSize(); res_array = env->NewBooleanArray(imageSize); LOGV("Load NewByteArray."); if (res_array == 0) { return 0; } env->SetBooleanArrayRegion(res_array, 0, imageSize, (jboolean*)m_strm->GetByte()); LOGV("Load SetBooleanArrayRegion."); cvReleaseImage( &backgroundImage ); cvReleaseImage( &grayImage ); cvReleaseImage( &differenceImage ); cvReleaseImage( &hsvImage ); cvReleaseImage( &hueImage ); cvReleaseImage( &saturationImage ); cvReleaseImage( &valueImage ); cvReleaseImage( &thresholdImage1 ); cvReleaseImage( &thresholdImage2 ); cvReleaseImage( &thresholdImage3 ); cvReleaseImage( &faceImage ); cvReleaseImage( &frameImage ); m_strm->Close(); SAFE_DELETE(m_strm); return res_array; }