static COMMAND_FUNC( do_centroid ) { OpenCV_Seq *ocv_seq_p; ocv_seq_p=PICK_OCV_SEQ("sequence"); if( ocv_seq_p == NO_OPENCV_SEQ ) return; if( ocv_seq_p->ocv_seq == NULL ) { sprintf(ERROR_STRING, "do_centroid: sequence is NULL.\n"); //WARN(ERROR_STRING); return; } CvMoments moments; double M00, M01, M10; float x,y; cvMoments(ocv_seq_p->ocv_seq, &moments, 1); M00 = cvGetSpatialMoment(&moments, 0, 0); M10 = cvGetSpatialMoment(&moments, 1, 0); M01 = cvGetSpatialMoment(&moments, 0, 1); x = (int)(M10/M00); y = (int)(M01/M00); /* char msg[30]; sprintf(msg, "M00 = %f, M10 = %f, M01 = %f", M00, M10, M01); WARN(msg); */ char number[30]; sprintf(number, "%f", x); ASSIGN_VAR("centroid_x", number); sprintf(number, "%f", y); ASSIGN_VAR("centroid_y", number); }
IplImage* threshImage(IplImage *imgOrig, CvScalar lower, CvScalar upper, int n) { IplImage* imgHSV = cvCreateImage(cvGetSize(imgOrig), 8, 3); //size, depth, channels cvCvtColor(imgOrig, imgHSV, CV_BGR2HSV); //check! IplImage* imgThresh = cvCreateImage(cvGetSize(imgOrig), 8, 1); cvInRangeS(imgHSV, lower, upper, imgThresh); CvMoments *moments = (CvMoments*)malloc(sizeof(CvMoments)); cvMoments (imgThresh, moments, 1); double moment10 = cvGetSpatialMoment(moments, 1, 0); double moment01 = cvGetSpatialMoment(moments, 0, 1); double area = cvGetSpatialMoment(moments, 0, 0); static int posX = 0; static int posY = 0; posX = moment10/area; posY = moment01/area; int curX = posX * XRATIO; int curY = posY * YRATIO; SetCursorPos(1366-curX, curY); delete moments; cvReleaseImage(&imgHSV); return imgThresh; }
/** * Draw a circle in the middle of found areas * @param Image to draw on * @param Image to get moments from */ void ColorTracking::DrawPoint(IplImage *frame, IplImage *thresh) { // NONE OF THIS IS USED AT THE MOMENT - THIS IS FOR REFERENCE // Calculate the moments to estimate the position of the items CvMoments *moments = (CvMoments*)malloc(sizeof(CvMoments)); cvMoments(thresh, moments, 1); // The actual moment values double moment10 = cvGetSpatialMoment(moments, 1, 0); double moment01 = cvGetSpatialMoment(moments, 0, 1); double area = cvGetCentralMoment(moments, 0, 0); // Holding the last and current ball positions static int posX = 0; static int posY = 0; int lastX = posX; int lastY = posY; posX = moment10/area; posY = moment01/area; // We want to draw a line only if its a valid position if(lastX>0 && lastY>0 && posX>0 && posY>0) { // Just add a circle to the frame when it finds the color //addObjectToVideo(frame, cvPoint(posX, posY)); // find x distance from middle to object drawWidthDiff(frame, cvPoint(posX, posY), middle); } delete moments; }
IplImage* getMoment(IplImage* img) { //Calculate Position CvMoments *moments = (CvMoments*)malloc(sizeof(CvMoments)); cvMoments(imgRed, moments, 1); double moment10 = cvGetSpatialMoment(moments, 1, 0); double moment01 = cvGetSpatialMoment(moments, 0, 1); double area = cvGetCentralMoment(moments,0,0); static int posX = 0; static int posY = 0; int lastX = posX; int lastY = posY; posX = moment10/area; posY = moment01/area; CvPoint* center = new CvPoint(); center->x = lastX; center->y = lastY; printf("%d lastX", lastX); printf("%d lastY", lastY); if(lastX != posX && lastY != posY) { cvDrawCircle(img, *center, 25, CV_RGB(10,10,255), -1); } return 0; }
void trackObject(IplImage* imgThresh){ // Calculate the moments of 'imgThresh' CvMoments *moments = (CvMoments*)malloc(sizeof(CvMoments)); cvMoments(imgThresh, moments, 1); double moment10 = cvGetSpatialMoment(moments, 1, 0); double moment01 = cvGetSpatialMoment(moments, 0, 1); double area = cvGetCentralMoment(moments, 0, 0); // if the area<100, I consider that the there are no object in the image and it's because of the noise, the area is not zero if(area>100){ // calculate the position of the ball int posX = moment10/area; int posY = moment01/area; if(debug>1 && lastX>=0 && lastY>=0 && posX>=0 && posY>=0) { // Draw a yellow line from the previous point to the current point cvLine(imgTracking, cvPoint(posX, posY), cvPoint(lastX, lastY), cvScalar(0,0,255), 4); } lastX = posX; lastY = posY; printf("%+05d %+05d\n", posX-halfWidth, posY-halfHeight); }else{ printf("***** *****\n"); } free(moments); }
void Segment::updateContour() { contour = 0; cvFindContours(iplMask, storage, &contour, sizeof(CvContour), CV_RETR_LIST, CV_CHAIN_APPROX_NONE); CvMoments moments; // CvSeq* c = contour; //cerr << "-------------------------- " << c << endl; /*for( CvSeq* c = contour; c!=NULL; c=c->h_next ){ for(int i = 0; i < c->total; i++){ CvPoint* p = CV_GET_SEQ_ELEM( CvPoint, c, i ); cerr << p->x << "," << p->y << endl; } } */ cvMoments(contour, &moments); double m00, m10, m01; m00 = cvGetSpatialMoment(&moments, 0,0); m10 = cvGetSpatialMoment(&moments, 1,0); m01 = cvGetSpatialMoment(&moments, 0,1); // TBD check that m00 != 0 float center_x = m10/m00; float center_y = m01/m00; centroid = cvPoint(center_x, center_y); }
void trackObject(IplImage* imgThresh){ // Kalkulasi momen untuk image yg telah di konvert CvMoments *moments = (CvMoments*)malloc(sizeof(CvMoments)); cvMoments(imgThresh, moments, 1); double moment10 = cvGetSpatialMoment(moments, 1, 0); double moment01 = cvGetSpatialMoment(moments, 0, 1); double area = cvGetCentralMoment(moments, 0, 0); // jika area <1000, sya memperhitungkan tidak ada objek di gambar karena akan mengganggu, dan areanya tidak nol if(area>1000){ // Menghitung posisi objek int posX = moment10/area; int posY = moment01/area; if(lastX>=0 && lastY>=0 && posX>=0 && posY>=0) { // Menggambar garis berwarna sesuai dengan HSV di Konfigurasi, dari titik pertama ke titik terakhir cvLine(imgTracking, cvPoint(posX, posY), cvPoint(lastX, lastY), cvScalar(lowH,lowS,lowV), 4); } lastX = posX; lastY = posY; } free(moments); }
void tracker_bitmap_color::run() { vsx_bitmap *bmp = in_bitmap->get_addr(); //Check if there is any new image to process if(!(bmp && bmp->valid && bmp->timestamp && bmp->timestamp != m_previousTimestamp)){ #ifdef VSXU_DEBUG printf("Skipping frame after %d \n",m_previousTimestamp); #endif return; } m_previousTimestamp = bmp->timestamp; initialize_buffers(bmp->size_x, bmp->size_y); //Grab the input image m_img[FILTER_NONE]->imageData = (char*)bmp->data; //1)filter the image to the HSV color space cvCvtColor(m_img[FILTER_NONE],m_img[FILTER_HSV],CV_RGB2HSV); //2Threshold the image based on the supplied range of colors cvInRangeS( m_img[FILTER_HSV], cvScalar( (int)(in_color1->get(0)*255), (int)(in_color1->get(1)*255), (int)(in_color1->get(2)*255) ), cvScalar( (int)(in_color2->get(0)*255), (int)(in_color2->get(1)*255), (int)(in_color2->get(2)*255) ), m_img[FILTER_HSV_THRESHOLD] ); //3)Now the math to find the centroid of the "thresholded image" //3.1)Get the moments cvMoments(m_img[FILTER_HSV_THRESHOLD],m_moments,1); double moment10 = cvGetSpatialMoment(m_moments,1,0); double moment01 = cvGetSpatialMoment(m_moments,0,1); double area = cvGetCentralMoment(m_moments,0,0); //3.2)Calculate the positions double posX = moment10/area; double posY = moment01/area; //3.3) Normalize the positions posX = posX/bmp->size_x; posY = posY/bmp->size_y; //Finally set the result #ifdef VSXU_DEBUG printf("Position: (%f,%f)\n",posX,posY); #endif out_centroid->set(posX,0); out_centroid->set(posY,1); //Calculate the debug output only if requested if(m_compute_debug_out){ m_compute_debug_out = false; cvCvtColor(m_img[FILTER_HSV_THRESHOLD],m_img[FILTER_HSV_THRESHOLD_RGB], CV_GRAY2RGB); m_debug = *bmp; m_debug.data = m_img[FILTER_HSV_THRESHOLD_RGB]->imageData; out_debug->set_p(m_debug); } }
/* * call-seq: * gravity_center -> cvpoint2d32f * * Return gravity center. */ VALUE rb_gravity_center(VALUE self) { CvMoments *moments = CVMOMENTS(self); double m00 = cvGetSpatialMoment(moments, 0, 0), m10 = cvGetSpatialMoment(moments, 1, 0), m01 = cvGetSpatialMoment(moments, 0, 1); return cCvPoint2D32f::new_object(cvPoint2D32f(m10 / m00, m01 / m00)); }
void getObjectCentre(IplImage* img, int* posX, int *posY) { // Calculate the moments to estimate the position of the ball CvMoments *moments = (CvMoments*)malloc(sizeof(CvMoments)); cvMoments(img, moments, 1); // The actual moment values double moment10 = cvGetSpatialMoment(moments, 1, 0); double moment01 = cvGetSpatialMoment(moments, 0, 1); double area = cvGetCentralMoment(moments, 0, 0); *posX = moment10/area; *posY = moment01/area; delete moments; }
void ThreadAnalyse::run() { int img_step = field->img->widthStep; int img_channels = field->img->nChannels; uchar * img_data = (uchar *) field->img->imageData; char key = -1; field->treat->origin = field->img->origin; int filtered_step = field->treat->widthStep; int filtered_channels = field->treat->nChannels; uchar * filtered_data_b = (uchar *) field->treat->imageData; for (int i = 0; i < field->img->width; i++) { for (int j = 0; j < field->img->height; j++) { if (i > field->box[XMIN] && i < field->box[XMAX] && j > field->box[YMIN] && j < field->box[YMAX] && (img_data[j * img_step + i * img_channels]) > field->color[MINB] && (img_data[j * img_step + i * img_channels]) < field->color[MAXB] && (img_data[j * img_step + i * img_channels + 1]) > field->color[MING] && (img_data[j * img_step + i * img_channels + 1]) < field->color[MAXG] && (img_data[j * img_step + i * img_channels + 2]) > field->color[MINR] && (img_data[j * img_step + i * img_channels + 2]) < field->color[MAXR]) { filtered_data_b[j * filtered_step + i * filtered_channels] = 255; } else { filtered_data_b[j * filtered_step + i * filtered_channels] = 0; } } } //cvErode(field->treat, field->treat, 0, 1); cvMoments(field->treat, moments, 1); // The actual moment values B double moment10 = cvGetSpatialMoment(moments, 1, 0); double moment01 = cvGetSpatialMoment(moments, 0, 1); double area = cvGetCentralMoment(moments, 0, 0); field->position->x = (int) moment10 / area; field->position->y = (int) moment01 / area; pthread_exit(NULL); }
/**************************************************************** ** fit_params *************************************************** ****************************************************************/ CFitParamsReturnType fit_params( CvMoments *pState, double *x0, double *y0, double *Mu00, double *Uu11, double *Uu20, double *Uu02, int width, int height, unsigned char *img, int img_step ) { double Mu10, Mu01; double val; CvMat arr; arr = fi2cv_view(img, img_step, width, height); /* get moments */ int binary=0; // not a binary image? CHKCV( cvMoments( &arr, pState, binary ), CFitParamsOtherError); /* calculate center of gravity from spatial moments */ CHKCV( val = cvGetSpatialMoment( pState, 0, 0), CFitParamsOtherError); *Mu00 = val; CHKCV( Mu10 = cvGetSpatialMoment( pState, 1, 0), CFitParamsOtherError); CHKCV( Mu01 = cvGetSpatialMoment( pState, 0, 1), CFitParamsOtherError); if( *Mu00 != 0.0 ) { *x0 = Mu10 / *Mu00; *y0 = Mu01 / *Mu00; /* relative to ROI origin */ } else { return CFitParamsZeroMomentError; } /* calculate blob orientation from central moments */ CHKCV( val = cvGetCentralMoment( pState, 1, 1), CFitParamsCentralMomentError); *Uu11 = val; CHKCV( val = cvGetCentralMoment( pState, 2, 0), CFitParamsCentralMomentError); *Uu20 = val; CHKCV( val = cvGetCentralMoment( pState, 0, 2), CFitParamsCentralMomentError); *Uu02 = val; return CFitParamsNoError; }
IplImage* contour(IplImage* img) { static int i; char fileName[20]; CvMemStorage* store; IplImage* aux=NULL; if(aux == NULL) { aux = cvCreateImage(cvGetSize(img),8,1); store = cvCreateMemStorage(0); } CvSeq * contours =0; cvFindContours(img,store,&contours); //finding contours in an image cvZero(aux); //if(contours->total) { cvDrawContours(aux,contours,cvScalarAll(255),cvScalarAll(255),100); } CvMoments *moments = (CvMoments*)malloc(sizeof(CvMoments)); double M00, M01, M10; fruitCount=0; while(contours!=NULL) //detects the moments means coords of individual contours { if( cvContourArea(contours,CV_WHOLE_SEQ) < 5 ) //detects only sizable objects { contours = contours->h_next; continue; } cvMoments(contours, moments); M00 = cvGetSpatialMoment(moments,0,0); M10 = cvGetSpatialMoment(moments,1,0); M01 = cvGetSpatialMoment(moments,0,1); centers[fruitCount].x = (int)(M10/M00); //global variable, stores the centre coords of an object centers[fruitCount].y = (int)(M01/M00); fruitCount++; //important global variable, it represents the total no. of objects detected in the image if it is zero the no action :) contours = contours->h_next; } cvClearMemStorage(store); return aux; }
std::vector<CvPoint> CaptureManager::GetTrajectory(int c) { std::vector<CvPoint> traj(frameCount); for (int i=0; i<frameCount; i++) { if (Access(i,0,false, true)->contourArray.size() <= c) traj[i]=cvPoint(-1,-1); else { CvMoments m; double m00,m10,m01; cvMoments(Access(i,0,false, true)->contourArray[c], &m); m00 = cvGetSpatialMoment(&m,0,0); m01 = cvGetSpatialMoment(&m,0,1); m10 = cvGetSpatialMoment(&m,1,0); traj[i] = cvPoint(cvRound(m.m10/m.m00), cvRound(m.m01/m.m00)); } } return traj; }
void trackObject(IplImage* imgThresh, int player){ // Calculate the moments of 'imgThresh' CvMoments *moments = (CvMoments*)malloc(sizeof(CvMoments)); cvMoments(imgThresh, moments, 1); double moment10 = cvGetSpatialMoment(moments, 1, 0); double moment01 = cvGetSpatialMoment(moments, 0, 1); double area = cvGetCentralMoment(moments, 0, 0); // if the area<1000, I consider that the there are no object in the image and it's because of the noise, the area is not zero if(area>1000){ // Draw a yellow line from the previous point to the current point if (player ==1){ posX1 = moment10/area; posY1 = moment01/area; } if (player ==2){ posX2 = moment10/area; posY2 = moment01/area; } lastX1 = posX1; lastY1 = posY1; lastX2 = posX2; lastY2 = posY2; free(moments); } }
void track_object( IplImage *imgThresh ) { // compute the "moments" in imgThresh CvMoments *moments = NULL; if( NULL == (moments = (CvMoments *) malloc( sizeof(*moments) )) ){ // XXX why the hack "malloc" in cpp??!!! QMessageBox::critical(0, QString( "ERROR" ), QString( "Allocation failed." )); return; } cvMoments( imgThresh, moments, 1 ); double area = cvGetCentralMoment( moments, 0, 0 ); double moment10 = cvGetSpatialMoment( moments, 1, 0 ); double moment01 = cvGetSpatialMoment( moments, 0, 1 ); free( moments ); /* if the area is <1000, area is considered noise and ignored */ if( 1000 >= area ){ printf("area too small\n"); return; } // compute position int xpos = moment10 / area; int ypos = moment01 / area; if( 0 <= xlast && 0 <= ylast && 0 <= xpos && 0 <= ypos ){ // draw trace.. printf("x:y - %d:%d\n", xpos, ypos); cvLine( imgTracking, cvPoint( xpos, ypos ), cvPoint( xlast, ylast ), cvScalar( 0, 0, 255), 5 ); } xlast = xpos; ylast = ypos; }
double BlobContour::GetMoment(int p, int q) { // is a valid moment? if (p < 0 || q < 0 || p > MAX_MOMENTS_ORDER || q > MAX_MOMENTS_ORDER) { return -1; } if (IsEmpty()) return 0; // it is calculated? if (m_moments.m00 == -1) { cvMoments(GetContourPoints(), &m_moments); } return cvGetSpatialMoment(&m_moments, p, q); }
void cvFindBlobsByCCClasters(IplImage* pFG, CvBlobSeq* pBlobs, CvMemStorage* storage) { /* Create contours: */ IplImage* pIB = NULL; CvSeq* cnt = NULL; CvSeq* cnt_list = cvCreateSeq(0,sizeof(CvSeq),sizeof(CvSeq*), storage ); CvSeq* clasters = NULL; int claster_cur, claster_num; pIB = cvCloneImage(pFG); cvThreshold(pIB,pIB,128,255,CV_THRESH_BINARY); cvFindContours(pIB,storage, &cnt, sizeof(CvContour), CV_RETR_EXTERNAL); cvReleaseImage(&pIB); /* Create cnt_list. */ /* Process each contour: */ for(; cnt; cnt=cnt->h_next) { cvSeqPush( cnt_list, &cnt); } claster_num = cvSeqPartition( cnt_list, storage, &clasters, CompareContour, NULL ); for(claster_cur=0; claster_cur<claster_num; ++claster_cur) { int cnt_cur; CvBlob NewBlob; double M00,X,Y,XX,YY; /* image moments */ CvMoments m; CvRect rect_res = cvRect(-1,-1,-1,-1); CvMat mat; for(cnt_cur=0; cnt_cur<clasters->total; ++cnt_cur) { CvRect rect; CvSeq* cnt; int k = *(int*)cvGetSeqElem( clasters, cnt_cur ); if(k!=claster_cur) continue; cnt = *(CvSeq**)cvGetSeqElem( cnt_list, cnt_cur ); rect = ((CvContour*)cnt)->rect; if(rect_res.height<0) { rect_res = rect; } else { /* Unite rects: */ int x0,x1,y0,y1; x0 = MIN(rect_res.x,rect.x); y0 = MIN(rect_res.y,rect.y); x1 = MAX(rect_res.x+rect_res.width,rect.x+rect.width); y1 = MAX(rect_res.y+rect_res.height,rect.y+rect.height); rect_res.x = x0; rect_res.y = y0; rect_res.width = x1-x0; rect_res.height = y1-y0; } } if(rect_res.height < 1 || rect_res.width < 1) { X = 0; Y = 0; XX = 0; YY = 0; } else { cvMoments( cvGetSubRect(pFG,&mat,rect_res), &m, 0 ); M00 = cvGetSpatialMoment( &m, 0, 0 ); if(M00 <= 0 ) continue; X = cvGetSpatialMoment( &m, 1, 0 )/M00; Y = cvGetSpatialMoment( &m, 0, 1 )/M00; XX = (cvGetSpatialMoment( &m, 2, 0 )/M00) - X*X; YY = (cvGetSpatialMoment( &m, 0, 2 )/M00) - Y*Y; } NewBlob = cvBlob(rect_res.x+(float)X,rect_res.y+(float)Y,(float)(4*sqrt(XX)),(float)(4*sqrt(YY))); pBlobs->AddBlob(&NewBlob); } /* Next cluster. */ #if 0 { // Debug info: IplImage* pI = cvCreateImage(cvSize(pFG->width,pFG->height),IPL_DEPTH_8U,3); cvZero(pI); for(claster_cur=0; claster_cur<claster_num; ++claster_cur) { int cnt_cur; CvScalar color = CV_RGB(rand()%256,rand()%256,rand()%256); for(cnt_cur=0; cnt_cur<clasters->total; ++cnt_cur) { CvSeq* cnt; int k = *(int*)cvGetSeqElem( clasters, cnt_cur ); if(k!=claster_cur) continue; cnt = *(CvSeq**)cvGetSeqElem( cnt_list, cnt_cur ); cvDrawContours( pI, cnt, color, color, 0, 1, 8); } CvBlob* pB = pBlobs->GetBlob(claster_cur); int x = cvRound(CV_BLOB_RX(pB)), y = cvRound(CV_BLOB_RY(pB)); cvEllipse( pI, cvPointFrom32f(CV_BLOB_CENTER(pB)), cvSize(MAX(1,x), MAX(1,y)), 0, 0, 360, color, 1 ); } cvNamedWindow( "Clusters", 0); cvShowImage( "Clusters",pI ); cvReleaseImage(&pI); } /* Debug info. */ #endif } /* cvFindBlobsByCCClasters */
/* cvDetectNewBlobs * Return 1 and fill blob pNewBlob with * blob parameters if new blob is detected: */ int CvBlobDetectorCC::DetectNewBlob(IplImage* /*pImg*/, IplImage* pFGMask, CvBlobSeq* pNewBlobList, CvBlobSeq* pOldBlobList) { int result = 0; CvSize S = cvSize(pFGMask->width,pFGMask->height); /* Shift blob list: */ { int i; if(m_pBlobLists[SEQ_SIZE-1]) delete m_pBlobLists[SEQ_SIZE-1]; for(i=SEQ_SIZE-1; i>0; --i) m_pBlobLists[i] = m_pBlobLists[i-1]; m_pBlobLists[0] = new CvBlobSeq; } /* Shift blob list. */ /* Create contours and add new blobs to blob list: */ { /* Create blobs: */ CvBlobSeq Blobs; CvMemStorage* storage = cvCreateMemStorage(); if(m_Clastering) { /* Glue contours: */ cvFindBlobsByCCClasters(pFGMask, &Blobs, storage ); } /* Glue contours. */ else { /**/ IplImage* pIB = cvCloneImage(pFGMask); CvSeq* cnts = NULL; CvSeq* cnt = NULL; cvThreshold(pIB,pIB,128,255,CV_THRESH_BINARY); cvFindContours(pIB,storage, &cnts, sizeof(CvContour), CV_RETR_EXTERNAL); /* Process each contour: */ for(cnt = cnts; cnt; cnt=cnt->h_next) { CvBlob NewBlob; /* Image moments: */ double M00,X,Y,XX,YY; CvMoments m; CvRect r = ((CvContour*)cnt)->rect; CvMat mat; if(r.height < S.height*m_HMin || r.width < S.width*m_WMin) continue; cvMoments( cvGetSubRect(pFGMask,&mat,r), &m, 0 ); M00 = cvGetSpatialMoment( &m, 0, 0 ); if(M00 <= 0 ) continue; X = cvGetSpatialMoment( &m, 1, 0 )/M00; Y = cvGetSpatialMoment( &m, 0, 1 )/M00; XX = (cvGetSpatialMoment( &m, 2, 0 )/M00) - X*X; YY = (cvGetSpatialMoment( &m, 0, 2 )/M00) - Y*Y; NewBlob = cvBlob(r.x+(float)X,r.y+(float)Y,(float)(4*sqrt(XX)),(float)(4*sqrt(YY))); Blobs.AddBlob(&NewBlob); } /* Next contour. */ cvReleaseImage(&pIB); } /* One contour - one blob. */ { /* Delete small and intersected blobs: */ int i; for(i=Blobs.GetBlobNum(); i>0; i--) { CvBlob* pB = Blobs.GetBlob(i-1); if(pB->h < S.height*m_HMin || pB->w < S.width*m_WMin) { Blobs.DelBlob(i-1); continue; } if(pOldBlobList) { int j; for(j=pOldBlobList->GetBlobNum(); j>0; j--) { CvBlob* pBOld = pOldBlobList->GetBlob(j-1); if((fabs(pBOld->x-pB->x) < (CV_BLOB_RX(pBOld)+CV_BLOB_RX(pB))) && (fabs(pBOld->y-pB->y) < (CV_BLOB_RY(pBOld)+CV_BLOB_RY(pB)))) { /* Intersection detected, delete blob from list: */ Blobs.DelBlob(i-1); break; } } /* Check next old blob. */ } /* if pOldBlobList. */ } /* Check next blob. */ } /* Delete small and intersected blobs. */ { /* Bubble-sort blobs by size: */ int N = Blobs.GetBlobNum(); int i,j; for(i=1; i<N; ++i) { for(j=i; j>0; --j) { CvBlob temp; float AreaP, AreaN; CvBlob* pP = Blobs.GetBlob(j-1); CvBlob* pN = Blobs.GetBlob(j); AreaP = CV_BLOB_WX(pP)*CV_BLOB_WY(pP); AreaN = CV_BLOB_WX(pN)*CV_BLOB_WY(pN); if(AreaN < AreaP)break; temp = pN[0]; pN[0] = pP[0]; pP[0] = temp; } } /* Copy only first 10 blobs: */ for(i=0; i<MIN(N,10); ++i) { m_pBlobLists[0]->AddBlob(Blobs.GetBlob(i)); } } /* Sort blobs by size. */ cvReleaseMemStorage(&storage); } /* Create blobs. */ { /* Shift each track: */ int j; for(j=0; j<m_TrackNum; ++j) { int i; DefSeq* pTrack = m_TrackSeq+j; for(i=SEQ_SIZE-1; i>0; --i) pTrack->pBlobs[i] = pTrack->pBlobs[i-1]; pTrack->pBlobs[0] = NULL; if(pTrack->size == SEQ_SIZE)pTrack->size--; } } /* Shift each track. */ /* Analyze blob list to find best blob trajectory: */ { double BestError = -1; int BestTrack = -1;; CvBlobSeq* pNewBlobs = m_pBlobLists[0]; int i; int NewTrackNum = 0; for(i=pNewBlobs->GetBlobNum(); i>0; --i) { CvBlob* pBNew = pNewBlobs->GetBlob(i-1); int j; int AsignedTrack = 0; for(j=0; j<m_TrackNum; ++j) { double dx,dy; DefSeq* pTrack = m_TrackSeq+j; CvBlob* pLastBlob = pTrack->size>0?pTrack->pBlobs[1]:NULL; if(pLastBlob == NULL) continue; dx = fabs(CV_BLOB_X(pLastBlob)-CV_BLOB_X(pBNew)); dy = fabs(CV_BLOB_Y(pLastBlob)-CV_BLOB_Y(pBNew)); if(dx > 2*CV_BLOB_WX(pLastBlob) || dy > 2*CV_BLOB_WY(pLastBlob)) continue; AsignedTrack++; if(pTrack->pBlobs[0]==NULL) { /* Fill existed track: */ pTrack->pBlobs[0] = pBNew; pTrack->size++; } else if((m_TrackNum+NewTrackNum)<SEQ_NUM) { /* Duplicate existed track: */ m_TrackSeq[m_TrackNum+NewTrackNum] = pTrack[0]; m_TrackSeq[m_TrackNum+NewTrackNum].pBlobs[0] = pBNew; NewTrackNum++; } } /* Next track. */ if(AsignedTrack==0 && (m_TrackNum+NewTrackNum)<SEQ_NUM ) { /* Initialize new track: */ m_TrackSeq[m_TrackNum+NewTrackNum].size = 1; m_TrackSeq[m_TrackNum+NewTrackNum].pBlobs[0] = pBNew; NewTrackNum++; } } /* Next new blob. */ m_TrackNum += NewTrackNum; /* Check each track: */ for(i=0; i<m_TrackNum; ++i) { int Good = 1; DefSeq* pTrack = m_TrackSeq+i; CvBlob* pBNew = pTrack->pBlobs[0]; if(pTrack->size != SEQ_SIZE) continue; if(pBNew == NULL ) continue; /* Check intersection last blob with existed: */ if(Good && pOldBlobList) { int k; for(k=pOldBlobList->GetBlobNum(); k>0; --k) { CvBlob* pBOld = pOldBlobList->GetBlob(k-1); if((fabs(pBOld->x-pBNew->x) < (CV_BLOB_RX(pBOld)+CV_BLOB_RX(pBNew))) && (fabs(pBOld->y-pBNew->y) < (CV_BLOB_RY(pBOld)+CV_BLOB_RY(pBNew)))) Good = 0; } } /* Check intersection last blob with existed. */ /* Check distance to image border: */ if(Good) { /* Check distance to image border: */ float dx = MIN(pBNew->x,S.width-pBNew->x)/CV_BLOB_RX(pBNew); float dy = MIN(pBNew->y,S.height-pBNew->y)/CV_BLOB_RY(pBNew); if(dx < m_MinDistToBorder || dy < m_MinDistToBorder) Good = 0; } /* Check distance to image border. */ /* Check uniform motion: */ if(Good) { /* Check uniform motion: */ double Error = 0; int N = pTrack->size; CvBlob** pBL = pTrack->pBlobs; float sum[2] = {0,0}; float jsum[2] = {0,0}; float a[2],b[2]; /* estimated parameters of moving x(t) = a*t+b*/ int j; for(j=0; j<N; ++j) { float x = pBL[j]->x; float y = pBL[j]->y; sum[0] += x; jsum[0] += j*x; sum[1] += y; jsum[1] += j*y; } a[0] = 6*((1-N)*sum[0]+2*jsum[0])/(N*(N*N-1)); b[0] = -2*((1-2*N)*sum[0]+3*jsum[0])/(N*(N+1)); a[1] = 6*((1-N)*sum[1]+2*jsum[1])/(N*(N*N-1)); b[1] = -2*((1-2*N)*sum[1]+3*jsum[1])/(N*(N+1)); for(j=0; j<N; ++j) { Error += pow(a[0]*j+b[0]-pBL[j]->x,2)+ pow(a[1]*j+b[1]-pBL[j]->y,2); } Error = sqrt(Error/N); if( Error > S.width*0.01 || fabs(a[0])>S.width*0.1 || fabs(a[1])>S.height*0.1) Good = 0; /* New best trajectory: */ if(Good && (BestError == -1 || BestError > Error)) { /* New best trajectory: */ BestTrack = i; BestError = Error; } /* New best trajectory. */ } /* Check uniform motion. */ } /* Next track. */ #if 0 { /**/ printf("BlobDetector configurations = %d [",m_TrackNum); int i; for(i=0; i<SEQ_SIZE; ++i) { printf("%d,",m_pBlobLists[i]?m_pBlobLists[i]->GetBlobNum():0); } printf("]\n"); } #endif if(BestTrack >= 0) { /* Put new blob to output and delete from blob list: */ assert(m_TrackSeq[BestTrack].size == SEQ_SIZE); assert(m_TrackSeq[BestTrack].pBlobs[0]); pNewBlobList->AddBlob(m_TrackSeq[BestTrack].pBlobs[0]); m_TrackSeq[BestTrack].pBlobs[0] = NULL; m_TrackSeq[BestTrack].size--; result = 1; } /* Put new blob to output and mark in blob list to delete. */ } /* Analyze blod list to find best blob trajectory. */ { /* Delete bad tracks: */ int i; for(i=m_TrackNum-1; i>=0; --i) { /* Delete bad tracks: */ if(m_TrackSeq[i].pBlobs[0]) continue; if(m_TrackNum>0) m_TrackSeq[i] = m_TrackSeq[--m_TrackNum]; } /* Delete bad tracks: */ } #ifdef USE_OBJECT_DETECTOR if( m_split_detector && pNewBlobList->GetBlobNum() > 0 ) { int num_new_blobs = pNewBlobList->GetBlobNum(); int i = 0; if( m_roi_seq ) cvClearSeq( m_roi_seq ); m_debug_blob_seq.Clear(); for( i = 0; i < num_new_blobs; ++i ) { CvBlob* b = pNewBlobList->GetBlob(i); CvMat roi_stub; CvMat* roi_mat = 0; CvMat* scaled_roi_mat = 0; CvDetectedBlob d_b = cvDetectedBlob( CV_BLOB_X(b), CV_BLOB_Y(b), CV_BLOB_WX(b), CV_BLOB_WY(b), 0 ); m_debug_blob_seq.AddBlob(&d_b); float scale = m_param_roi_scale * m_min_window_size.height / CV_BLOB_WY(b); float b_width = MAX(CV_BLOB_WX(b), m_min_window_size.width / scale) + (m_param_roi_scale - 1.0F) * (m_min_window_size.width / scale) + 2.0F * m_max_border / scale; float b_height = CV_BLOB_WY(b) * m_param_roi_scale + 2.0F * m_max_border / scale; CvRect roi = cvRectIntersection( cvRect( cvFloor(CV_BLOB_X(b) - 0.5F*b_width), cvFloor(CV_BLOB_Y(b) - 0.5F*b_height), cvCeil(b_width), cvCeil(b_height) ), cvRect( 0, 0, pImg->width, pImg->height ) ); if( roi.width <= 0 || roi.height <= 0 ) continue; if( m_roi_seq ) cvSeqPush( m_roi_seq, &roi ); roi_mat = cvGetSubRect( pImg, &roi_stub, roi ); scaled_roi_mat = cvCreateMat( cvCeil(scale*roi.height), cvCeil(scale*roi.width), CV_8UC3 ); cvResize( roi_mat, scaled_roi_mat ); m_detected_blob_seq.Clear(); m_split_detector->Detect( scaled_roi_mat, &m_detected_blob_seq ); cvReleaseMat( &scaled_roi_mat ); for( int k = 0; k < m_detected_blob_seq.GetBlobNum(); ++k ) { CvDetectedBlob* b = (CvDetectedBlob*) m_detected_blob_seq.GetBlob(k); /* scale and shift each detected blob back to the original image coordinates */ CV_BLOB_X(b) = CV_BLOB_X(b) / scale + roi.x; CV_BLOB_Y(b) = CV_BLOB_Y(b) / scale + roi.y; CV_BLOB_WX(b) /= scale; CV_BLOB_WY(b) /= scale; CvDetectedBlob d_b = cvDetectedBlob( CV_BLOB_X(b), CV_BLOB_Y(b), CV_BLOB_WX(b), CV_BLOB_WY(b), 1, b->response ); m_debug_blob_seq.AddBlob(&d_b); } if( m_detected_blob_seq.GetBlobNum() > 1 ) { /* * Split blob. * The original blob is replaced by the first detected blob, * remaining detected blobs are added to the end of the sequence: */ CvBlob* first_b = m_detected_blob_seq.GetBlob(0); CV_BLOB_X(b) = CV_BLOB_X(first_b); CV_BLOB_Y(b) = CV_BLOB_Y(first_b); CV_BLOB_WX(b) = CV_BLOB_WX(first_b); CV_BLOB_WY(b) = CV_BLOB_WY(first_b); for( int j = 1; j < m_detected_blob_seq.GetBlobNum(); ++j ) { CvBlob* detected_b = m_detected_blob_seq.GetBlob(j); pNewBlobList->AddBlob(detected_b); } } } /* For each new blob. */ for( i = 0; i < pNewBlobList->GetBlobNum(); ++i ) { CvBlob* b = pNewBlobList->GetBlob(i); CvDetectedBlob d_b = cvDetectedBlob( CV_BLOB_X(b), CV_BLOB_Y(b), CV_BLOB_WX(b), CV_BLOB_WY(b), 2 ); m_debug_blob_seq.AddBlob(&d_b); } } // if( m_split_detector ) #endif return result; } /* cvDetectNewBlob */
/* cvDetectNewBlobs * return 1 and fill blob pNewBlob by blob parameters * if new blob is detected: */ int CvBlobDetectorSimple::DetectNewBlob(IplImage* /*pImg*/, IplImage* pFGMask, CvBlobSeq* pNewBlobList, CvBlobSeq* pOldBlobList) { int result = 0; CvSize S = cvSize(pFGMask->width,pFGMask->height); if(m_pMaskBlobNew == NULL ) m_pMaskBlobNew = cvCreateImage(S,IPL_DEPTH_8U,1); if(m_pMaskBlobExist == NULL ) m_pMaskBlobExist = cvCreateImage(S,IPL_DEPTH_8U,1); /* Shift blob list: */ { int i; if(m_pBlobLists[0]) delete m_pBlobLists[0]; for(i=1;i<EBD_FRAME_NUM;++i)m_pBlobLists[i-1]=m_pBlobLists[i]; m_pBlobLists[EBD_FRAME_NUM-1] = new CvBlobSeq; } /* Shift blob list. */ /* Create exist blob mask: */ cvCopy(pFGMask, m_pMaskBlobNew); /* Create contours and add new blobs to blob list: */ { /* Create blobs: */ CvBlobSeq Blobs; CvMemStorage* storage = cvCreateMemStorage(); #if 1 { /* Glue contours: */ cvFindBlobsByCCClasters(m_pMaskBlobNew, &Blobs, storage ); } /* Glue contours. */ #else { /**/ IplImage* pIB = cvCloneImage(m_pMaskBlobNew); CvSeq* cnts = NULL; CvSeq* cnt = NULL; cvThreshold(pIB,pIB,128,255,CV_THRESH_BINARY); cvFindContours(pIB,storage, &cnts, sizeof(CvContour), CV_RETR_EXTERNAL); /* Process each contour: */ for(cnt = cnts; cnt; cnt=cnt->h_next) { CvBlob NewBlob; /* Image moments: */ double M00,X,Y,XX,YY; CvMoments m; CvRect r = ((CvContour*)cnt)->rect; CvMat mat; if(r.height < S.height*0.02 || r.width < S.width*0.02) continue; cvMoments( cvGetSubRect(m_pMaskBlobNew,&mat,r), &m, 0 ); M00 = cvGetSpatialMoment( &m, 0, 0 ); if(M00 <= 0 ) continue; X = cvGetSpatialMoment( &m, 1, 0 )/M00; Y = cvGetSpatialMoment( &m, 0, 1 )/M00; XX = (cvGetSpatialMoment( &m, 2, 0 )/M00) - X*X; YY = (cvGetSpatialMoment( &m, 0, 2 )/M00) - Y*Y; NewBlob = cvBlob(r.x+(float)X,r.y+(float)Y,(float)(4*sqrt(XX)),(float)(4*sqrt(YY))); Blobs.AddBlob(&NewBlob); } /* Next contour. */ cvReleaseImage(&pIB); } /* One contour - one blob. */ #endif { /* Delete small and intersected blobs: */ int i; for(i=Blobs.GetBlobNum(); i>0; i--) { CvBlob* pB = Blobs.GetBlob(i-1); if(pB->h < S.height*0.02 || pB->w < S.width*0.02) { Blobs.DelBlob(i-1); continue; } if(pOldBlobList) { int j; for(j=pOldBlobList->GetBlobNum(); j>0; j--) { CvBlob* pBOld = pOldBlobList->GetBlob(j-1); if((fabs(pBOld->x-pB->x) < (CV_BLOB_RX(pBOld)+CV_BLOB_RX(pB))) && (fabs(pBOld->y-pB->y) < (CV_BLOB_RY(pBOld)+CV_BLOB_RY(pB)))) { /* Intersection is present, so delete blob from list: */ Blobs.DelBlob(i-1); break; } } /* Check next old blob. */ } /* if pOldBlobList */ } /* Check next blob. */ } /* Delete small and intersected blobs. */ { /* Bubble-sort blobs by size: */ int N = Blobs.GetBlobNum(); int i,j; for(i=1; i<N; ++i) { for(j=i; j>0; --j) { CvBlob temp; float AreaP, AreaN; CvBlob* pP = Blobs.GetBlob(j-1); CvBlob* pN = Blobs.GetBlob(j); AreaP = CV_BLOB_WX(pP)*CV_BLOB_WY(pP); AreaN = CV_BLOB_WX(pN)*CV_BLOB_WY(pN); if(AreaN < AreaP)break; temp = pN[0]; pN[0] = pP[0]; pP[0] = temp; } } /* Copy only first 10 blobs: */ for(i=0; i<MIN(N,10); ++i) { m_pBlobLists[EBD_FRAME_NUM-1]->AddBlob(Blobs.GetBlob(i)); } } /* Sort blobs by size. */ cvReleaseMemStorage(&storage); } /* Create blobs. */ /* Analyze blob list to find best blob trajectory: */ { int Count = 0; int pBLIndex[EBD_FRAME_NUM]; int pBL_BEST[EBD_FRAME_NUM]; int i; int finish = 0; double BestError = -1; int Good = 1; for(i=0; i<EBD_FRAME_NUM; ++i) { pBLIndex[i] = 0; pBL_BEST[i] = 0; } /* Check configuration exist: */ for(i=0; Good && (i<EBD_FRAME_NUM); ++i) if(m_pBlobLists[i] == NULL || m_pBlobLists[i]->GetBlobNum() == 0) Good = 0; if(Good) do{ /* For each configuration: */ CvBlob* pBL[EBD_FRAME_NUM]; int Good = 1; double Error = 0; CvBlob* pBNew = m_pBlobLists[EBD_FRAME_NUM-1]->GetBlob(pBLIndex[EBD_FRAME_NUM-1]); for(i=0; i<EBD_FRAME_NUM; ++i) pBL[i] = m_pBlobLists[i]->GetBlob(pBLIndex[i]); Count++; /* Check intersection last blob with existed: */ if(Good && pOldBlobList) { /* Check intersection last blob with existed: */ int k; for(k=pOldBlobList->GetBlobNum(); k>0; --k) { CvBlob* pBOld = pOldBlobList->GetBlob(k-1); if((fabs(pBOld->x-pBNew->x) < (CV_BLOB_RX(pBOld)+CV_BLOB_RX(pBNew))) && (fabs(pBOld->y-pBNew->y) < (CV_BLOB_RY(pBOld)+CV_BLOB_RY(pBNew)))) Good = 0; } } /* Check intersection last blob with existed. */ /* Check distance to image border: */ if(Good) { /* Check distance to image border: */ CvBlob* pB = pBNew; float dx = MIN(pB->x,S.width-pB->x)/CV_BLOB_RX(pB); float dy = MIN(pB->y,S.height-pB->y)/CV_BLOB_RY(pB); if(dx < 1.1 || dy < 1.1) Good = 0; } /* Check distance to image border. */ /* Check uniform motion: */ if(Good) { int N = EBD_FRAME_NUM; float sum[2] = {0,0}; float jsum[2] = {0,0}; float a[2],b[2]; /* estimated parameters of moving x(t) = a*t+b*/ int j; for(j=0; j<N; ++j) { float x = pBL[j]->x; float y = pBL[j]->y; sum[0] += x; jsum[0] += j*x; sum[1] += y; jsum[1] += j*y; } a[0] = 6*((1-N)*sum[0]+2*jsum[0])/(N*(N*N-1)); b[0] = -2*((1-2*N)*sum[0]+3*jsum[0])/(N*(N+1)); a[1] = 6*((1-N)*sum[1]+2*jsum[1])/(N*(N*N-1)); b[1] = -2*((1-2*N)*sum[1]+3*jsum[1])/(N*(N+1)); for(j=0; j<N; ++j) { Error += pow(a[0]*j+b[0]-pBL[j]->x,2)+ pow(a[1]*j+b[1]-pBL[j]->y,2); } Error = sqrt(Error/N); if( Error > S.width*0.01 || fabs(a[0])>S.width*0.1 || fabs(a[1])>S.height*0.1) Good = 0; } /* Check configuration. */ /* New best trajectory: */ if(Good && (BestError == -1 || BestError > Error)) { for(i=0; i<EBD_FRAME_NUM; ++i) { pBL_BEST[i] = pBLIndex[i]; } BestError = Error; } /* New best trajectory. */ /* Set next configuration: */ for(i=0; i<EBD_FRAME_NUM; ++i) { pBLIndex[i]++; if(pBLIndex[i] != m_pBlobLists[i]->GetBlobNum()) break; pBLIndex[i]=0; } /* Next time shift. */ if(i==EBD_FRAME_NUM)finish=1; } while(!finish); /* Check next time configuration of connected components. */ #if 0 {/**/ printf("BlobDetector configurations = %d [",Count); int i; for(i=0; i<EBD_FRAME_NUM; ++i) { printf("%d,",m_pBlobLists[i]?m_pBlobLists[i]->GetBlobNum():0); } printf("]\n"); } #endif if(BestError != -1) { /* Write new blob to output and delete from blob list: */ CvBlob* pNewBlob = m_pBlobLists[EBD_FRAME_NUM-1]->GetBlob(pBL_BEST[EBD_FRAME_NUM-1]); pNewBlobList->AddBlob(pNewBlob); for(i=0; i<EBD_FRAME_NUM; ++i) { /* Remove blob from each list: */ m_pBlobLists[i]->DelBlob(pBL_BEST[i]); } /* Remove blob from each list. */ result = 1; } /* Write new blob to output and delete from blob list. */ } /* Analyze blob list to find best blob trajectory. */ return result; } /* cvDetectNewBlob */
JNIEXPORT jbooleanArray JNICALL Java_org_siprop_opencv_OpenCV_faceDetect(JNIEnv* env, jobject thiz, jintArray photo_data1, jintArray photo_data2, jint width, jint height) { LOGV("Load desp."); int i, x, y; int* pixels; IplImage *frameImage; IplImage *backgroundImage = cvCreateImage( cvSize(width, height), IPL_DEPTH_8U, 1 ); IplImage *grayImage = cvCreateImage( cvSize(width, height), IPL_DEPTH_8U, 1 ); IplImage *differenceImage = cvCreateImage( cvSize(width, height), IPL_DEPTH_8U, 1 ); IplImage *hsvImage = cvCreateImage( cvSize(width, height), IPL_DEPTH_8U, 3 ); IplImage *hueImage = cvCreateImage( cvSize(width, height), IPL_DEPTH_8U, 1 ); IplImage *saturationImage = cvCreateImage( cvSize(width, height), IPL_DEPTH_8U, 1 ); IplImage *valueImage = cvCreateImage( cvSize(width, height), IPL_DEPTH_8U, 1 ); IplImage *thresholdImage1 = cvCreateImage( cvSize(width, height), IPL_DEPTH_8U, 1 ); IplImage *thresholdImage2 = cvCreateImage( cvSize(width, height), IPL_DEPTH_8U, 1 ); IplImage *thresholdImage3 = cvCreateImage( cvSize(width, height), IPL_DEPTH_8U, 1 ); IplImage *faceImage = cvCreateImage( cvSize(width, height), IPL_DEPTH_8U, 1 ); CvMoments moment; double m_00; double m_10; double m_01; int gravityX; int gravityY; jbooleanArray res_array; int imageSize; // Load Image pixels = env->GetIntArrayElements(photo_data1, 0); frameImage = loadPixels(pixels, width, height); if(frameImage == 0) { LOGV("Error loadPixels."); return 0; } cvCvtColor( frameImage, backgroundImage, CV_BGR2GRAY ); pixels = env->GetIntArrayElements(photo_data2, 0); frameImage = loadPixels(pixels, width, height); if(frameImage == 0) { LOGV("Error loadPixels."); return 0; } cvCvtColor( frameImage, grayImage, CV_BGR2GRAY ); cvAbsDiff( grayImage, backgroundImage, differenceImage ); cvCvtColor( frameImage, hsvImage, CV_BGR2HSV ); LOGV("Load cvCvtColor."); cvSplit( hsvImage, hueImage, saturationImage, valueImage, 0 ); LOGV("Load cvSplit."); cvThreshold( hueImage, thresholdImage1, THRESH_BOTTOM, THRESHOLD_MAX_VALUE, CV_THRESH_BINARY ); cvThreshold( hueImage, thresholdImage2, THRESH_TOP, THRESHOLD_MAX_VALUE, CV_THRESH_BINARY_INV ); cvAnd( thresholdImage1, thresholdImage2, thresholdImage3, 0 ); LOGV("Load cvAnd."); cvAnd( differenceImage, thresholdImage3, faceImage, 0 ); cvMoments( faceImage, &moment, 0 ); m_00 = cvGetSpatialMoment( &moment, 0, 0 ); m_10 = cvGetSpatialMoment( &moment, 1, 0 ); m_01 = cvGetSpatialMoment( &moment, 0, 1 ); gravityX = m_10 / m_00; gravityY = m_01 / m_00; LOGV("Load cvMoments."); cvCircle( frameImage, cvPoint( gravityX, gravityY ), CIRCLE_RADIUS, CV_RGB( 255, 0, 0 ), LINE_THICKNESS, LINE_TYPE, 0 ); CvMat stub, *mat_image; int channels, ipl_depth; mat_image = cvGetMat( frameImage, &stub ); channels = CV_MAT_CN( mat_image->type ); ipl_depth = cvCvToIplDepth(mat_image->type); WLNonFileByteStream* m_strm = new WLNonFileByteStream(); loadImageBytes(mat_image->data.ptr, mat_image->step, mat_image->width, mat_image->height, ipl_depth, channels, m_strm); LOGV("Load loadImageBytes."); imageSize = m_strm->GetSize(); res_array = env->NewBooleanArray(imageSize); LOGV("Load NewByteArray."); if (res_array == 0) { return 0; } env->SetBooleanArrayRegion(res_array, 0, imageSize, (jboolean*)m_strm->GetByte()); LOGV("Load SetBooleanArrayRegion."); cvReleaseImage( &backgroundImage ); cvReleaseImage( &grayImage ); cvReleaseImage( &differenceImage ); cvReleaseImage( &hsvImage ); cvReleaseImage( &hueImage ); cvReleaseImage( &saturationImage ); cvReleaseImage( &valueImage ); cvReleaseImage( &thresholdImage1 ); cvReleaseImage( &thresholdImage2 ); cvReleaseImage( &thresholdImage3 ); cvReleaseImage( &faceImage ); cvReleaseImage( &frameImage ); m_strm->Close(); SAFE_DELETE(m_strm); return res_array; }
int main(int argc, char* argv[]) { CvMemStorage *contStorage = cvCreateMemStorage(0); CvSeq *contours; CvTreeNodeIterator polyIterator; int found = 0; int i; CvPoint poly_point; int fps=30; // ポリライン近似 CvMemStorage *polyStorage = cvCreateMemStorage(0); CvSeq *polys, *poly; // OpenCV variables CvFont font; printf("start!\n"); //pwm initialize if(gpioInitialise() < 0) return -1; //pigpio CW/CCW pin setup //X:18, Y1:14, Y2:15 gpioSetMode(18, PI_OUTPUT); gpioSetMode(14, PI_OUTPUT); gpioSetMode(15, PI_OUTPUT); //pigpio pulse setup //X:25, Y1:23, Y2:24 gpioSetMode(25, PI_OUTPUT); gpioSetMode(23, PI_OUTPUT); gpioSetMode(24, PI_OUTPUT); //limit-switch setup gpioSetMode(5, PI_INPUT); gpioWrite(5, 0); gpioSetMode(6, PI_INPUT); gpioWrite(6, 0); gpioSetMode(7, PI_INPUT); gpioWrite(7, 0); gpioSetMode(8, PI_INPUT); gpioWrite(8, 0); gpioSetMode(13, PI_INPUT); gpioSetMode(19, PI_INPUT); gpioSetMode(26, PI_INPUT); gpioSetMode(21, PI_INPUT); CvCapture* capture_robot_side = cvCaptureFromCAM(0); CvCapture* capture_human_side = cvCaptureFromCAM(1); if(capture_robot_side == NULL){ std::cout << "Robot Side Camera Capture FAILED" << std::endl; return -1; } if(capture_human_side ==NULL){ std::cout << "Human Side Camera Capture FAILED" << std::endl; return -1; } // size設定 cvSetCaptureProperty(capture_robot_side,CV_CAP_PROP_FRAME_WIDTH,CAM_PIX_WIDTH); cvSetCaptureProperty(capture_robot_side,CV_CAP_PROP_FRAME_HEIGHT,CAM_PIX_HEIGHT); cvSetCaptureProperty(capture_human_side,CV_CAP_PROP_FRAME_WIDTH,CAM_PIX_WIDTH); cvSetCaptureProperty(capture_human_side,CV_CAP_PROP_FRAME_HEIGHT,CAM_PIX_HEIGHT); //fps設定 cvSetCaptureProperty(capture_robot_side,CV_CAP_PROP_FPS,fps); cvSetCaptureProperty(capture_human_side,CV_CAP_PROP_FPS,fps); // 画像の表示用ウィンドウ生成 //cvNamedWindow("Previous Image", CV_WINDOW_AUTOSIZE); // cvNamedWindow("Now Image", CV_WINDOW_AUTOSIZE); // cvNamedWindow("pack", CV_WINDOW_AUTOSIZE); // cvNamedWindow("mallet", CV_WINDOW_AUTOSIZE); // cvNamedWindow ("Poly", CV_WINDOW_AUTOSIZE); //Create trackbar to change brightness int iSliderValue1 = 50; cvCreateTrackbar("Brightness", "Now Image", &iSliderValue1, 100); //Create trackbar to change contrast int iSliderValue2 = 50; cvCreateTrackbar("Contrast", "Now Image", &iSliderValue2, 100); //pack threthold 0, 50, 120, 220, 100, 220 int iSliderValuePack1 = 54; //80; cvCreateTrackbar("minH", "pack", &iSliderValuePack1, 255); int iSliderValuePack2 = 84;//106; cvCreateTrackbar("maxH", "pack", &iSliderValuePack2, 255); int iSliderValuePack3 = 100;//219; cvCreateTrackbar("minS", "pack", &iSliderValuePack3, 255); int iSliderValuePack4 = 255;//175; cvCreateTrackbar("maxS", "pack", &iSliderValuePack4, 255); int iSliderValuePack5 = 0;//29; cvCreateTrackbar("minV", "pack", &iSliderValuePack5, 255); int iSliderValuePack6 = 255;//203; cvCreateTrackbar("maxV", "pack", &iSliderValuePack6, 255); //mallet threthold 0, 255, 100, 255, 140, 200 int iSliderValuemallet1 = 106; cvCreateTrackbar("minH", "mallet", &iSliderValuemallet1, 255); int iSliderValuemallet2 = 135; cvCreateTrackbar("maxH", "mallet", &iSliderValuemallet2, 255); int iSliderValuemallet3 = 218;//140 cvCreateTrackbar("minS", "mallet", &iSliderValuemallet3, 255); int iSliderValuemallet4 = 255; cvCreateTrackbar("maxS", "mallet", &iSliderValuemallet4, 255); int iSliderValuemallet5 = 0; cvCreateTrackbar("minV", "mallet", &iSliderValuemallet5, 255); int iSliderValuemallet6 = 105; cvCreateTrackbar("maxV", "mallet", &iSliderValuemallet6, 255); // 画像ファイルポインタの宣言 IplImage* img_robot_side = cvQueryFrame(capture_robot_side); IplImage* img_human_side = cvQueryFrame(capture_human_side); IplImage* img_all_round = cvCreateImage(cvSize(CAM_PIX_WIDTH, CAM_PIX_2HEIGHT), IPL_DEPTH_8U, 3); IplImage* tracking_img = cvCreateImage(cvGetSize(img_all_round), IPL_DEPTH_8U, 3); IplImage* img_all_round2 = cvCreateImage(cvGetSize(img_all_round), IPL_DEPTH_8U, 3); IplImage* show_img = cvCreateImage(cvGetSize(img_all_round), IPL_DEPTH_8U, 3); cv::Mat mat_frame1; cv::Mat mat_frame2; cv::Mat dst_img_v; cv::Mat dst_bright_cont; int iBrightness = iSliderValue1 - 50; double dContrast = iSliderValue2 / 50.0; IplImage* dst_img_frame = cvCreateImage(cvGetSize(img_all_round), IPL_DEPTH_8U, 3); IplImage* grayscale_img = cvCreateImage(cvGetSize(img_all_round), IPL_DEPTH_8U, 1); IplImage* poly_tmp = cvCreateImage( cvGetSize( img_all_round), IPL_DEPTH_8U, 1); IplImage* poly_dst = cvCreateImage( cvGetSize( img_all_round), IPL_DEPTH_8U, 3); IplImage* poly_gray = cvCreateImage( cvGetSize(img_all_round),IPL_DEPTH_8U,1); int rotate_times = 0; //IplImage* -> Mat mat_frame1 = cv::cvarrToMat(img_robot_side); mat_frame2 = cv::cvarrToMat(img_human_side); //上下左右を反転。本番環境では、mat_frame1を反転させる cv::flip(mat_frame1, mat_frame1, 0); //水平軸で反転(垂直反転) cv::flip(mat_frame1, mat_frame1, 1); //垂直軸で反転(水平反転) vconcat(mat_frame2, mat_frame1, dst_img_v); dst_img_v.convertTo(dst_bright_cont, -1, dContrast, iBrightness); //1枚にした画像をコンバート //画像の膨張と縮小 // cv::Mat close_img; // cv::Mat element(3,3,CV_8U, cv::Scalar::all(255)); // cv::morphologyEx(dst_img_v, close_img, cv::MORPH_CLOSE, element, cv::Point(-1,-1), 3); // cv::imshow("morphologyEx", dst_img_v); // dst_img_v.convertTo(dst_bright_cont, -1, dContrast, iBrightness); //1枚にした画像をコンバート //明るさ調整した結果を変換(Mat->IplImage*)して渡す。その後解放。 *img_all_round = dst_bright_cont; cv_ColorExtraction(img_all_round, dst_img_frame, CV_BGR2HSV, 0, 54, 77, 255, 0, 255); cvCvtColor(dst_img_frame, grayscale_img, CV_BGR2GRAY); cv_Labelling(grayscale_img, tracking_img); cvCvtColor(tracking_img, poly_gray, CV_BGR2GRAY); cvCopy( poly_gray, poly_tmp); cvCvtColor( poly_gray, poly_dst, CV_GRAY2BGR); //画像の膨張と縮小 //cvMorphologyEx(tracking_img, tracking_img,) // 輪郭抽出 found = cvFindContours( poly_tmp, contStorage, &contours, sizeof( CvContour), CV_RETR_EXTERNAL, CV_CHAIN_APPROX_SIMPLE); // ポリライン近似 polys = cvApproxPoly( contours, sizeof( CvContour), polyStorage, CV_POLY_APPROX_DP, 8, 10); cvInitTreeNodeIterator( &polyIterator, ( void*)polys, 10); poly = (CvSeq *)cvNextTreeNode( &polyIterator); printf("sort before by X\n"); for( i=0; i<poly->total; i++) { poly_point = *( CvPoint*)cvGetSeqElem( poly, i); // cvCircle( poly_dst, poly_point, 1, CV_RGB(255, 0 , 255), -1); // cvCircle( poly_dst, poly_point, 8, CV_RGB(255, 0 , 255)); std::cout << "x:" << poly_point.x << ", y:" << poly_point.y << std::endl; } printf("Poly FindTotal:%d\n",poly->total); //枠の座標決定 //左上 の 壁サイド側 upper_left_f //左上 の ゴール寄り upper_left_g //右上 の 壁サイド側 upper_right_f //右上 の ゴール寄り upper_right_g //左下 の 壁サイド側 lower_left_f //左下 の ゴール寄り lower_left_g //右下 の 壁サイド側 lower_right_f //右下 の ゴール寄り lower_right_g CvPoint upper_left_f, upper_left_g, upper_right_f, upper_right_g, lower_left_f, lower_left_g, lower_right_f, lower_right_g, robot_goal_left, robot_goal_right; CvPoint frame_points[8]; // if(poly->total == 8){ // for( i=0; i<8; i++){ // poly_point = *( CvPoint*)cvGetSeqElem( poly, i); // frame_points[i] = poly_point; // } // qsort(frame_points, 8, sizeof(CvPoint), compare_cvpoint); // printf("sort after by X\n"); // for( i=0; i<8; i++){ // std::cout << "x:" << frame_points[i].x << ", y:" << frame_points[i].y << std::endl; // } // if(frame_points[0].y < frame_points[1].y){ // upper_left_f = frame_points[0]; // lower_left_f = frame_points[1]; // } // else{ // upper_left_f = frame_points[1]; // lower_left_f = frame_points[0]; // } // if(frame_points[2].y < frame_points[3].y){ // upper_left_g = frame_points[2]; // lower_left_g = frame_points[3]; // } // else{ // upper_left_g = frame_points[3]; // lower_left_g = frame_points[2]; // } // if(frame_points[4].y < frame_points[5].y){ // upper_right_g = frame_points[4]; // lower_right_g = frame_points[5]; // } // else{ // upper_right_g = frame_points[5]; // lower_right_g = frame_points[4]; // } // if(frame_points[6].y < frame_points[7].y){ // upper_right_f = frame_points[6]; // lower_right_f = frame_points[7]; // } // else{ // upper_right_f = frame_points[7]; // lower_right_f = frame_points[6]; // } // } // else{ printf("Frame is not 8 Point\n"); upper_left_f = cvPoint(26, 29); upper_right_f = cvPoint(136, 29); lower_left_f = cvPoint(26, 220); lower_right_f = cvPoint(136, 220); upper_left_g = cvPoint(38, 22); upper_right_g = cvPoint(125, 22); lower_left_g = cvPoint(38, 226); lower_right_g = cvPoint(125, 226); robot_goal_left = cvPoint(60, 226); robot_goal_right = cvPoint(93, 226); // cvCopy(img_all_round, show_img); // cvLine(show_img, upper_left_f, upper_right_f, CV_RGB( 255, 255, 0 )); // cvLine(show_img, lower_left_f, lower_right_f, CV_RGB( 255, 255, 0 )); // cvLine(show_img, upper_right_f, lower_right_f, CV_RGB( 255, 255, 0 )); // cvLine(show_img, upper_left_f, lower_left_f, CV_RGB( 255, 255, 0 )); // // cvLine(show_img, upper_left_g, upper_right_g, CV_RGB( 0, 255, 0 )); // cvLine(show_img, lower_left_g, lower_right_g, CV_RGB( 0, 255, 0 )); // cvLine(show_img, upper_right_g, lower_right_g, CV_RGB( 0, 255, 0 )); // cvLine(show_img, upper_left_g, lower_left_g, CV_RGB( 0, 255, 0 )); //while(1){ //cvShowImage("Now Image", show_img); //cvShowImage ("Poly", poly_dst); //if(cv::waitKey(1) >= 0) { //break; //} //} //return -1; // } printf("upper_left_fX:%d, Y:%d\n",upper_left_f.x, upper_left_f.y); printf("upper_left_gX:%d, Y:%d\n",upper_left_g.x, upper_left_g.y); printf("upper_right_fX:%d,Y:%d\n", upper_right_f.x, upper_right_f.y); printf("upper_right_gX:%d, Y:%d\n" , upper_right_g.x, upper_right_g.y); printf("lower_left_fX:%d, Y:%d\n", lower_left_f.x, lower_left_f.y); printf("lower_left_gX:%d, Y:%d\n", lower_left_g.x, lower_left_g.y); printf("lower_right_fX:%d, Y:%d\n", lower_right_f.x, lower_right_f.y); printf("lower_right_gX:%d, Y:%d\n", lower_right_g.x, lower_right_g.y); printf("robot_goal_left:%d, Y:%d\n", robot_goal_left.x, robot_goal_left.y); printf("robot_goal_right:%d, Y:%d\n", robot_goal_right.x, robot_goal_right.y); cvReleaseImage(&dst_img_frame); cvReleaseImage(&grayscale_img); cvReleaseImage(&poly_tmp); cvReleaseImage(&poly_gray); cvReleaseMemStorage(&contStorage); cvReleaseMemStorage(&polyStorage); //return 1; // Init font cvInitFont(&font,CV_FONT_HERSHEY_SIMPLEX|CV_FONT_ITALIC, 0.4,0.4,0,1); bool is_pushed_decision_button = 1;//もう一方のラズパイ信号にする while(1){ //決定ボタンが押されたらスタート if(gpioRead(8)==0 && is_pushed_decision_button==1){ cvCopy(img_all_round, img_all_round2); cvCopy(img_all_round, show_img); img_robot_side = cvQueryFrame(capture_robot_side); img_human_side = cvQueryFrame(capture_human_side); //IplImage* -> Mat mat_frame1 = cv::cvarrToMat(img_robot_side); mat_frame2 = cv::cvarrToMat(img_human_side); //上下左右を反転。本番環境では、mat_frame1を反転させる cv::flip(mat_frame1, mat_frame1, 0); //水平軸で反転(垂直反転) cv::flip(mat_frame1, mat_frame1, 1); //垂直軸で反転(水平反転) vconcat(mat_frame2, mat_frame1, dst_img_v); iBrightness = iSliderValue1 - 50; dContrast = iSliderValue2 / 50.0; dst_img_v.convertTo(dst_bright_cont, -1, dContrast, iBrightness); //1枚にした画像をコンバート //明るさ調整した結果を変換(Mat->IplImage*)して渡す。その後解放。 *img_all_round = dst_bright_cont; mat_frame1.release(); mat_frame2.release(); dst_img_v.release(); IplImage* dst_img_mallet = cvCreateImage(cvGetSize(img_all_round), IPL_DEPTH_8U, 3); IplImage* dst_img_pack = cvCreateImage(cvGetSize(img_all_round), IPL_DEPTH_8U, 3); IplImage* dst_img2_mallet = cvCreateImage(cvGetSize(img_all_round2), IPL_DEPTH_8U, 3); IplImage* dst_img2_pack = cvCreateImage(cvGetSize(img_all_round2), IPL_DEPTH_8U, 3); cv_ColorExtraction(img_all_round, dst_img_pack, CV_BGR2HSV, iSliderValuePack1, iSliderValuePack2, iSliderValuePack3, iSliderValuePack4, iSliderValuePack5, iSliderValuePack6); cv_ColorExtraction(img_all_round, dst_img_mallet, CV_BGR2HSV, iSliderValuemallet1, iSliderValuemallet2, iSliderValuemallet3, iSliderValuemallet4, iSliderValuemallet5, iSliderValuemallet6); cv_ColorExtraction(img_all_round2, dst_img2_pack, CV_BGR2HSV, iSliderValuePack1, iSliderValuePack2, iSliderValuePack3, iSliderValuePack4, iSliderValuePack5, iSliderValuePack6); //CvMoments moment_mallet; CvMoments moment_pack; CvMoments moment_mallet; CvMoments moment2_pack; //cvSetImageCOI(dst_img_mallet, 1); cvSetImageCOI(dst_img_pack, 1); cvSetImageCOI(dst_img_mallet, 1); cvSetImageCOI(dst_img2_pack, 1); //cvMoments(dst_img_mallet, &moment_mallet, 0); cvMoments(dst_img_pack, &moment_pack, 0); cvMoments(dst_img_mallet, &moment_mallet, 0); cvMoments(dst_img2_pack, &moment2_pack, 0); //座標計算 double m00_before = cvGetSpatialMoment(&moment2_pack, 0, 0); double m10_before = cvGetSpatialMoment(&moment2_pack, 1, 0); double m01_before = cvGetSpatialMoment(&moment2_pack, 0, 1); double m00_after = cvGetSpatialMoment(&moment_pack, 0, 0); double m10_after = cvGetSpatialMoment(&moment_pack, 1, 0); double m01_after = cvGetSpatialMoment(&moment_pack, 0, 1); double gX_before = m10_before/m00_before; double gY_before = m01_before/m00_before; double gX_after = m10_after/m00_after; double gY_after = m01_after/m00_after; double m00_mallet = cvGetSpatialMoment(&moment_mallet, 0, 0); double m10_mallet = cvGetSpatialMoment(&moment_mallet, 1, 0); double m01_mallet = cvGetSpatialMoment(&moment_mallet, 0, 1); double gX_now_mallet = m10_mallet/m00_mallet; double gY_now_mallet = m01_mallet/m00_mallet; int target_direction = -1; //目標とする向き 時計回り=1、 反時計回り=0 //円の大きさは全体の1/10で描画 // cvCircle(show_img, cvPoint(gX_before, gY_before), CAM_PIX_HEIGHT/10, CV_RGB(0,0,255), 6, 8, 0); // cvCircle(show_img, cvPoint(gX_now_mallet, gY_now_mallet), CAM_PIX_HEIGHT/10, CV_RGB(0,0,255), 6, 8, 0); // cvLine(show_img, cvPoint(gX_before, gY_before), cvPoint(gX_after, gY_after), cvScalar(0,255,0), 2); // cvLine(show_img, robot_goal_left, robot_goal_right, cvScalar(0,255,255), 2); printf("gX_after: %f\n",gX_after); printf("gY_after: %f\n",gY_after); printf("gX_before: %f\n",gX_before); printf("gY_before: %f\n",gY_before); printf("gX_now_mallet: %f\n",gX_now_mallet); printf("gY_now_mallet: %f\n",gY_now_mallet); int target_destanceY = CAM_PIX_2HEIGHT - 30; //Y座標の距離を一定にしている。ディフェンスライン。 //パックの移動は直線のため、一次関数の計算を使って、その後の軌跡を予測する。 double a_inclination; double b_intercept; int closest_frequency; int target_coordinateX; int origin_coordinateY; int target_coordinateY; double center_line = (lower_right_f.x + lower_right_g.x + lower_left_f.x + lower_left_g.x)/4; int left_frame = (upper_left_f.x + lower_left_f.x)/2; int right_frame = (upper_right_f.x + lower_right_f.x)/2; if(robot_goal_right.x < gX_now_mallet){ gpioPWM(25, 128); closest_frequency = gpioSetPWMfrequency(25, 1500); target_direction = 0;//反時計回り } else if(gX_now_mallet < robot_goal_left.x){ gpioPWM(25, 128); closest_frequency = gpioSetPWMfrequency(25, 1500); target_direction = 1;//時計回り } else{ //pwm output for rotate //台の揺れを想定してマージンをとる if(abs(gX_after - gX_before) <= 1 && abs(gY_after - gY_before) <= 1){//パックが動いてない場合一時停止 gpioPWM(25, 0); closest_frequency = gpioSetPWMfrequency(25, 0); a_inclination = 0; b_intercept=0; } else{ a_inclination = (gY_after - gY_before) / (gX_after - gX_before); b_intercept = gY_after - a_inclination * gX_after; //一次関数で目標X座標の計算 if(a_inclination){ target_coordinateX = (int)((target_destanceY - b_intercept) / a_inclination); } else{ target_coordinateX = center_line; } origin_coordinateY = a_inclination * left_frame + b_intercept; int rebound_max = 5; int rebound_num = 0; while(target_coordinateX < left_frame || right_frame < target_coordinateX){ if(target_coordinateX < left_frame){ //左側の枠での跳ね返り後の軌跡。左枠側平均 target_coordinateX = 2 * left_frame - target_coordinateX; b_intercept -= 2 * ((-a_inclination) * left_frame); a_inclination = -a_inclination; origin_coordinateY = a_inclination * left_frame + b_intercept; if(target_coordinateX < right_frame){ } else{ //左側の枠から右側の枠に当たるときのY座標 target_coordinateY = a_inclination * right_frame + b_intercept; } } else if(right_frame < target_coordinateX){ //右側の枠での跳ね返り後の軌跡。右枠側平均 target_coordinateX = 2 * right_frame - target_coordinateX; b_intercept += 2 * (a_inclination * right_frame); a_inclination= -a_inclination; //cvLine(show_img, cvPoint(right_frame, b_intercept), cvPoint((int)target_coordinateX, target_destanceY), cvScalar(0,0,255), 2); origin_coordinateY = a_inclination * right_frame + b_intercept; if(left_frame < target_coordinateX){ } else{ //右側の枠から左側の枠に当たるときのY座標 target_coordinateY = a_inclination * left_frame + b_intercept; } } rebound_num++; if(rebound_max < rebound_num){ //跳ね返りが多すぎる時は、中央を指定 target_coordinateX = (lower_right_f.x + lower_right_g.x + lower_left_f.x + lower_left_g.x)/4; break; } } if(target_coordinateX < center_line && center_line < gX_now_mallet){ target_direction = 0; gpioPWM(25, 128); closest_frequency = gpioSetPWMfrequency(25, 1000); } else if(center_line < target_coordinateX && gX_now_mallet < center_line){ target_direction = 1; gpioPWM(25, 128); closest_frequency = gpioSetPWMfrequency(25, 1000); } else{ gpioPWM(25, 0); closest_frequency = gpioSetPWMfrequency(25, 0); } } printf("a_inclination: %f\n",a_inclination); printf("b_intercept: %f\n",b_intercept); } if(target_direction != -1){ gpioWrite(18, target_direction); } //pwm output for rotate //台の揺れを想定してマージンをとる /*if(abs(gX_after - gX_before) <= 1){//パックが動いてない場合一時停止 gpioPWM(25, 0); closest_frequency = gpioSetPWMfrequency(25, 0); a_inclination = 0; b_intercept=0; } else if(gY_after-1 < gY_before ){ //packが離れていく時、台の中央に戻る a_inclination = 0; b_intercept=0; //目標値は中央。台のロボット側(4点)からを計算 double center_line = (lower_right_f.x + lower_right_g.x + lower_left_f.x + lower_left_g.x)/4; if(center_line + 3 < gX_now_mallet){ //+1 マージン gpioPWM(25, 128); closest_frequency = gpioSetPWMfrequency(25, 1500); target_direction = 0;//反時計回り } else if(gX_now_mallet < center_line-3){ //-1 マージン gpioPWM(25, 128); closest_frequency = gpioSetPWMfrequency(25, 1500); target_direction = 1;//時計回り } else{ gpioPWM(25, 0); closest_frequency = gpioSetPWMfrequency(25, 0); } } else{ gpioPWM(25, 128); closest_frequency = gpioSetPWMfrequency(25, 1500); a_inclination = (gY_after - gY_before) / (gX_after - gX_before); b_intercept = gY_after - a_inclination * gX_after; //一次関数で目標X座標の計算 if(a_inclination){ target_coordinateX = (int)((target_destanceY - b_intercept) / a_inclination); } else{ target_coordinateX = 0; } } printf("a_inclination: %f\n",a_inclination); printf("b_intercept: %f\n",b_intercept); int left_frame = (upper_left_f.x + lower_left_f.x)/2; int right_frame = (upper_right_f.x + lower_right_f.x)/2; origin_coordinateY = a_inclination * left_frame + b_intercept; if(target_coordinateX < left_frame){ cvLine(show_img, cvPoint((int)gX_after, (int)gY_after), cvPoint(left_frame, origin_coordinateY), cvScalar(0,255,255), 2); } else if(right_frame < target_coordinateX){ cvLine(show_img, cvPoint((int)gX_after, (int)gY_after), cvPoint(right_frame, origin_coordinateY), cvScalar(0,255,255), 2); } else{ cvLine(show_img, cvPoint((int)gX_after, (int)gY_after), cvPoint((int)target_coordinateX, target_destanceY), cvScalar(0,255,255), 2); } int rebound_max = 5; int rebound_num = 0; while(target_coordinateX < left_frame || right_frame < target_coordinateX){ if(target_coordinateX < left_frame){ //左側の枠での跳ね返り後の軌跡。左枠側平均 target_coordinateX = 2 * left_frame - target_coordinateX; b_intercept -= 2 * ((-a_inclination) * left_frame); a_inclination = -a_inclination; origin_coordinateY = a_inclination * left_frame + b_intercept; if(target_coordinateX < right_frame){ cvLine(show_img, cvPoint(left_frame, origin_coordinateY), cvPoint((int)target_coordinateX, target_destanceY), cvScalar(0,255,255), 2); } else{ //左側の枠から右側の枠に当たるときのY座標 target_coordinateY = a_inclination * right_frame + b_intercept; cvLine(show_img, cvPoint(left_frame, origin_coordinateY), cvPoint(right_frame, target_coordinateY), cvScalar(0,255,255), 2); } } else if(right_frame < target_coordinateX){ //右側の枠での跳ね返り後の軌跡。右枠側平均 target_coordinateX = 2 * right_frame - target_coordinateX; b_intercept += 2 * (a_inclination * right_frame); a_inclination= -a_inclination; //cvLine(show_img, cvPoint(right_frame, b_intercept), cvPoint((int)target_coordinateX, target_destanceY), cvScalar(0,0,255), 2); origin_coordinateY = a_inclination * right_frame + b_intercept; if(left_frame < target_coordinateX){ cvLine(show_img, cvPoint(right_frame, origin_coordinateY), cvPoint((int)target_coordinateX, target_destanceY), cvScalar(0,255,255), 2); } else{ //右側の枠から左側の枠に当たるときのY座標 target_coordinateY = a_inclination * left_frame + b_intercept; cvLine(show_img, cvPoint(right_frame, origin_coordinateY), cvPoint(left_frame, target_coordinateY), cvScalar(0,255,255), 2); } } rebound_num++; if(rebound_max < rebound_num){ //跳ね返りが多すぎる時は、中央を指定 target_coordinateX = (lower_right_f.x + lower_right_g.x + lower_left_f.x + lower_left_g.x)/4; break; } } printf("target_coordinateX: %d\n",target_coordinateX); //防御ラインの描画 cvLine(show_img, cvPoint(CAM_PIX_WIDTH, target_destanceY), cvPoint(0, target_destanceY), cvScalar(255,255,0), 2); //マレットの動きの描画 cvLine(show_img, cvPoint((int)gX_now_mallet, (int)gY_now_mallet), cvPoint((int)target_coordinateX, target_destanceY), cvScalar(0,0,255), 2); //cvPutText (show_img, to_c_char((int)gX_now_mallet), cvPoint(460,30), &font, cvScalar(220,50,50)); //cvPutText (show_img, to_c_char((int)target_coordinateX), cvPoint(560,30), &font, cvScalar(50,220,220)); int amount_movement = target_coordinateX - gX_now_mallet; //reacted limit-switch and target_direction rotate // if(gpioRead(6) == 1){//X軸右 // gpioPWM(25, 128); // closest_frequency = gpioSetPWMfrequency(25, 1500); // target_direction = 0;//反時計回り // printf("X軸右リミット!反時計回り\n"); // } // else if(gpioRead(26) == 1){//X軸左 gpioPWM(25, 128); closest_frequency = gpioSetPWMfrequency(25, 1500); target_direction = 1;//時計回り printf("X軸左リミット!時計回り\n"); } else if(gpioRead(5) == 1){//Y軸右上 gpioPWM(23, 128); gpioSetPWMfrequency(23, 1500); gpioWrite(14, 0); printf("Y軸右上リミット!時計回り\n"); } else if(gpioRead(13) == 1){//Y軸右下 gpioPWM(23, 128); gpioSetPWMfrequency(23, 1500); gpioWrite(14, 1); printf("Y軸右下リミット!反時計回り\n"); } else if(gpioRead(19) == 1){//Y軸左下 gpioPWM(24, 128); gpioSetPWMfrequency(24, 1500); gpioWrite(15, 0); printf("Y軸左下リミット!時計回り\n"); } else if(gpioRead(21) == 1){//Y軸左上 gpioPWM(24, 0); gpioSetPWMfrequency(24, 1500); gpioWrite(15, 1); printf("Y軸左上リミット!反時計回り\n"); } else{ //Y軸固定のため gpioSetPWMfrequency(23, 0); gpioSetPWMfrequency(24, 0); if(amount_movement > 0){ target_direction = 1;//時計回り } else if(amount_movement < 0){ target_direction = 0;//反時計回り } } if(target_direction != -1){ gpioWrite(18, target_direction); } else{ gpioPWM(24, 0); gpioSetPWMfrequency(24, 0); } printf("setting_frequency: %d\n", closest_frequency);*/ // 指定したウィンドウ内に画像を表示する //cvShowImage("Previous Image", img_all_round2); // cvShowImage("Now Image", show_img); // cvShowImage("pack", dst_img_pack); // cvShowImage("mallet", dst_img_mallet); // cvShowImage ("Poly", poly_dst); cvReleaseImage (&dst_img_mallet); cvReleaseImage (&dst_img_pack); cvReleaseImage (&dst_img2_mallet); cvReleaseImage (&dst_img2_pack); if(cv::waitKey(1) >= 0) { break; } } else{ //リセット信号が来た場合 is_pushed_decision_button = 0; } } gpioTerminate(); cvDestroyAllWindows(); //Clean up used CvCapture* cvReleaseCapture(&capture_robot_side); cvReleaseCapture(&capture_human_side); //Clean up used images cvReleaseImage(&poly_dst); cvReleaseImage(&tracking_img); cvReleaseImage(&img_all_round); cvReleaseImage(&img_human_side); cvReleaseImage(&img_all_round2); cvReleaseImage(&show_img); cvReleaseImage(&img_robot_side); return 0; }
int colour_tracker() { // Initialize capturing live feed from the camera CvCapture* capture = 0; capture = cvCaptureFromCAM(0); // Couldn't get a device? Throw an error and quit if(!capture) { printf("Could not initialize capturing...\n"); return -1; } // The two windows we'll be using cvNamedWindow("video"); cvNamedWindow("thresh"); // This image holds the "scribble" data... // the tracked positions of the ball IplImage* imgScribble = NULL; // An infinite loop while(true) { // Will hold a frame captured from the camera IplImage* frame = 0; frame = cvQueryFrame(capture); // If we couldn't grab a frame... quit if(!frame) break; // If this is the first frame, we need to initialize it if(imgScribble == NULL) { imgScribble = cvCreateImage(cvGetSize(frame), 8, 3); } // Holds the yellow thresholded image (yellow = white, rest = black) IplImage* imgYellowThresh = GetThresholdedImage(frame); // Calculate the moments to estimate the position of the ball CvMoments *moments = (CvMoments*)malloc(sizeof(CvMoments)); cvMoments(imgYellowThresh, moments, 1); // The actual moment values double moment10 = cvGetSpatialMoment(moments, 1, 0); double moment01 = cvGetSpatialMoment(moments, 0, 1); double area = cvGetCentralMoment(moments, 0, 0); // Holding the last and current ball positions static int posX = 0; static int posY = 0; int lastX = posX; int lastY = posY; posX = moment10/area; posY = moment01/area; // Print it out for debugging purposes printf("position (%d,%d)\n", posX, posY); // We want to draw a line only if its a valid position if(lastX>0 && lastY>0 && posX>0 && posY>0) { // Draw a yellow line from the previous point to the current point cvLine(imgScribble, cvPoint(posX, posY), cvPoint(lastX, lastY), cvScalar(0,255,255), 5); } // Add the scribbling image and the frame... and we get a combination of the two cvAdd(frame, imgScribble, frame); cvShowImage("thresh", imgYellowThresh); cvShowImage("video", frame); // Wait for a keypress int c = cvWaitKey(10); if(c!=-1) { // If pressed, break out of the loop break; } // Release the thresholded image... we need no memory leaks.. please cvReleaseImage(&imgYellowThresh); delete moments; } // We're done using the camera. Other applications can now use it cvReleaseCapture(&capture); return 0; }
static CvTestSeqElem* icvTestSeqReadElemOne(CvTestSeq_* pTS, CvFileStorage* fs, CvFileNode* node) { int noise_type = CV_NOISE_NONE;; CvTestSeqElem* pElem = NULL; const char* pVideoName = cvReadStringByName( fs, node,"Video", NULL); const char* pVideoObjName = cvReadStringByName( fs, node,"VideoObj", NULL); if(pVideoName) { /* Check to noise flag: */ if( cv_stricmp(pVideoName,"noise_gaussian") == 0 || cv_stricmp(pVideoName,"noise_normal") == 0) noise_type = CV_NOISE_GAUSSIAN; if( cv_stricmp(pVideoName,"noise_uniform") == 0) noise_type = CV_NOISE_UNIFORM; if( cv_stricmp(pVideoName,"noise_speckle") == 0) noise_type = CV_NOISE_SPECKLE; if( cv_stricmp(pVideoName,"noise_salt_and_pepper") == 0) noise_type = CV_NOISE_SALT_AND_PEPPER; } if((pVideoName || pVideoObjName ) && noise_type == CV_NOISE_NONE) { /* Read other elements: */ if(pVideoName) pElem = icvTestSeqReadElemAll(pTS, fs, pVideoName); if(pVideoObjName) { CvTestSeqElem* pE; pElem = icvTestSeqReadElemAll(pTS, fs, pVideoObjName); for(pE=pElem;pE;pE=pE->next) { pE->ObjID = pTS->ObjNum; pE->pObjName = pVideoObjName; } pTS->ObjNum++; } } /* Read other elements. */ else { /* Create new element: */ CvFileNode* pPosNode = cvGetFileNodeByName( fs, node,"Pos"); CvFileNode* pSizeNode = cvGetFileNodeByName( fs, node,"Size"); int AutoSize = (pSizeNode && CV_NODE_IS_STRING(pSizeNode->tag) && cv_stricmp("auto",cvReadString(pSizeNode,""))==0); int AutoPos = (pPosNode && CV_NODE_IS_STRING(pPosNode->tag) && cv_stricmp("auto",cvReadString(pPosNode,""))==0); const char* pFileName = cvReadStringByName( fs, node,"File", NULL); pElem = (CvTestSeqElem*)cvAlloc(sizeof(CvTestSeqElem)); memset(pElem,0,sizeof(CvTestSeqElem)); pElem->ObjID = -1; pElem->noise_type = noise_type; cvRandInit( &pElem->rnd_state, 1, 0, 0,CV_RAND_NORMAL); if(pFileName && pElem->noise_type == CV_NOISE_NONE) { /* If AVI or BMP: */ size_t l = strlen(pFileName); pElem->pFileName = pFileName; pElem->type = SRC_TYPE_IMAGE; if(cv_stricmp(".avi",pFileName+l-4) == 0)pElem->type = SRC_TYPE_AVI; if(pElem->type == SRC_TYPE_IMAGE) { //pElem->pImg = cvLoadImage(pFileName); if(pElem->pImg) { pElem->FrameNum = 1; if(pElem->pImgMask)cvReleaseImage(&(pElem->pImgMask)); pElem->pImgMask = cvCreateImage( cvSize(pElem->pImg->width,pElem->pImg->height), IPL_DEPTH_8U,1); icvTestSeqCreateMask(pElem->pImg,pElem->pImgMask,FG_BG_THRESHOLD); } } if(pElem->type == SRC_TYPE_AVI && pFileName) { //pElem->pAVI = cvCaptureFromFile(pFileName); if(pElem->pAVI) { IplImage* pImg = 0;//cvQueryFrame(pElem->pAVI); pElem->pImg = cvCloneImage(pImg); pElem->pImg->origin = 0; //cvSetCaptureProperty(pElem->pAVI,CV_CAP_PROP_POS_FRAMES,0); pElem->FrameBegin = 0; pElem->AVILen = pElem->FrameNum = 0;//(int)cvGetCaptureProperty(pElem->pAVI, CV_CAP_PROP_FRAME_COUNT); //cvReleaseCapture(&pElem->pAVI); pElem->pAVI = NULL; } else { printf("WARNING!!! Cannot open avi file %s\n",pFileName); } } } /* If AVI or BMP. */ if(pPosNode) { /* Read positions: */ if(CV_NODE_IS_SEQ(pPosNode->tag)) { int num = pPosNode->data.seq->total; pElem->pPos = (CvPoint2D32f*)cvAlloc(sizeof(float)*num); cvReadRawData( fs, pPosNode, pElem->pPos, "f" ); pElem->PosNum = num/2; if(pElem->FrameNum == 0) pElem->FrameNum = pElem->PosNum; } } if(pSizeNode) { /* Read sizes: */ if(CV_NODE_IS_SEQ(pSizeNode->tag)) { int num = pSizeNode->data.seq->total; pElem->pSize = (CvPoint2D32f*)cvAlloc(sizeof(float)*num); cvReadRawData( fs, pSizeNode, pElem->pSize, "f" ); pElem->SizeNum = num/2; } } if(AutoPos || AutoSize) { /* Auto size and pos: */ int i; int num = (pElem->type == SRC_TYPE_AVI)?pElem->AVILen:1; if(AutoSize) { pElem->pSize = (CvPoint2D32f*)cvAlloc(sizeof(CvPoint2D32f)*num); pElem->SizeNum = num; } if(AutoPos) { pElem->pPos = (CvPoint2D32f*)cvAlloc(sizeof(CvPoint2D32f)*num); pElem->PosNum = num; } for(i=0; i<num; ++i) { IplImage* pFG = NULL; CvPoint2D32f* pPos = AutoPos?(pElem->pPos + i):NULL; CvPoint2D32f* pSize = AutoSize?(pElem->pSize + i):NULL; icvTestSeqQureyFrameElem(pElem,i); pFG = pElem->pImgMask; if(pPos) { pPos->x = 0.5f; pPos->y = 0.5f; } if(pSize) { pSize->x = 0; pSize->y = 0; } if(pFG) { double M00; CvMoments m; cvMoments( pElem->pImgMask, &m, 0 ); M00 = cvGetSpatialMoment( &m, 0, 0 ); if(M00 > 0 && pSize ) { double X = cvGetSpatialMoment( &m, 1, 0 )/M00; double Y = cvGetSpatialMoment( &m, 0, 1 )/M00; double XX = (cvGetSpatialMoment( &m, 2, 0 )/M00) - X*X; double YY = (cvGetSpatialMoment( &m, 0, 2 )/M00) - Y*Y; pSize->x = (float)(4*sqrt(XX))/(pElem->pImgMask->width-1); pSize->y = (float)(4*sqrt(YY))/(pElem->pImgMask->height-1); } if(M00 > 0 && pPos) { pPos->x = (float)(cvGetSpatialMoment( &m, 1, 0 )/(M00*(pElem->pImgMask->width-1))); pPos->y = (float)(cvGetSpatialMoment( &m, 0, 1 )/(M00*(pElem->pImgMask->height-1))); } if(pPos) { /* Another way to calculate y pos * using object median: */ int y0=0, y1=pFG->height-1; for(y0=0; y0<pFG->height; ++y0) { CvMat m; CvScalar s = cvSum(cvGetRow(pFG, &m, y0)); if(s.val[0] > 255*7) break; } for(y1=pFG->height-1; y1>0; --y1) { CvMat m; CvScalar s = cvSum(cvGetRow(pFG, &m, y1)); if(s.val[0] > 255*7) break; } pPos->y = (y0+y1)*0.5f/(pFG->height-1); } } /* pFG */ } /* Next frame. */ //if(pElem->pAVI) cvReleaseCapture(&pElem->pAVI); pElem->pAVI = NULL; } /* End auto position creation. */ } /* Create new element. */ if(pElem) { /* Read transforms and: */ int FirstFrame, LastFrame; CvTestSeqElem* p=pElem; CvFileNode* pTransNode = NULL; CvFileNode* pS = NULL; int ShiftByPos = 0; int KeyFrames[1024]; CvSeq* pTransSeq = NULL; int KeyFrameNum = 0; pTransNode = cvGetFileNodeByName( fs, node,"Trans"); while( pTransNode && CV_NODE_IS_STRING(pTransNode->tag) && cv_stricmp("auto",cvReadString(pTransNode,""))!=0) { /* Trans is reference: */ pTransNode = cvGetFileNodeByName( fs, NULL,cvReadString(pTransNode,"")); } pS = cvGetFileNodeByName( fs, node,"Shift"); ShiftByPos = 0; pTransSeq = pTransNode?(CV_NODE_IS_SEQ(pTransNode->tag)?pTransNode->data.seq:NULL):NULL; KeyFrameNum = pTransSeq?pTransSeq->total:1; if( (pS && CV_NODE_IS_STRING(pS->tag) && cv_stricmp("auto",cvReadString(pS,""))==0) ||(pTransNode && CV_NODE_IS_STRING(pTransNode->tag) && cv_stricmp("auto",cvReadString(pTransNode,""))==0)) { ShiftByPos = 1; } FirstFrame = pElem->FrameBegin; LastFrame = pElem->FrameBegin+pElem->FrameNum-1; /* Calculate length of video and reallocate * transformation array: */ for(p=pElem; p; p=p->next) { int v; v = cvReadIntByName( fs, node, "BG", -1 ); if(v!=-1)p->BG = v; v = cvReadIntByName( fs, node, "Mask", -1 ); if(v!=-1)p->Mask = v; p->FrameBegin += cvReadIntByName( fs, node, "FrameBegin", 0 ); p->FrameNum = cvReadIntByName( fs, node, "FrameNum", p->FrameNum ); p->FrameNum = cvReadIntByName( fs, node, "Dur", p->FrameNum ); { int LastFrame = cvReadIntByName( fs, node, "LastFrame", p->FrameBegin+p->FrameNum-1 ); p->FrameNum = MIN(p->FrameNum,LastFrame - p->FrameBegin+1); } icvTestSeqAllocTrans(p); { /* New range estimation: */ int LF = p->FrameBegin+p->FrameNum-1; if(p==pElem || FirstFrame > p->FrameBegin)FirstFrame = p->FrameBegin; if(p==pElem || LastFrame < LF)LastFrame = LF; } /* New range estimation. */ } /* End allocate new transfrom array. */ if(ShiftByPos) { for(p=pElem;p;p=p->next) { /* Modify transformation to make autoshift: */ int i; int num = p->FrameNum; assert(num <= p->TransNum); p->TransNum = MAX(1,num); for(i=0; i<num; ++i) { CvTSTrans* pT = p->pTrans+i; //float t = (num>1)?((float)i/(num-1)):0.0f; float newx = p->pPos[i%p->PosNum].x; float newy = p->pPos[i%p->PosNum].y; pT->Shift.x = -newx*pT->Scale.x; pT->Shift.y = -newy*pT->Scale.y; if(p->pImg) { newx *= p->pImg->width-1; newy *= p->pImg->height-1; } pT->T[2] = -(pT->T[0]*newx+pT->T[1]*newy); pT->T[5] = -(pT->T[3]*newx+pT->T[4]*newy); } } /* Modify transformation old. */ } /* Next record. */ /* Initialize frame number array: */ KeyFrames[0] = FirstFrame; if(pTransSeq&&KeyFrameNum>1) { int i0,i1,i; for(i=0; i<KeyFrameNum; ++i) { CvFileNode* pTN = (CvFileNode*)cvGetSeqElem(pTransSeq,i); KeyFrames[i] = cvReadIntByName(fs,pTN,"frame",-1); } if(KeyFrames[0]<0)KeyFrames[0]=FirstFrame; if(KeyFrames[KeyFrameNum-1]<0)KeyFrames[KeyFrameNum-1]=LastFrame; for(i0=0, i1=1; i1<KeyFrameNum;) { int i; for(i1=i0+1; i1<KeyFrameNum && KeyFrames[i1]<0; i1++); assert(i1<KeyFrameNum); assert(i1>i0); for(i=i0+1; i<i1; ++i) { KeyFrames[i] = cvRound(KeyFrames[i0] + (float)(i-i0)*(float)(KeyFrames[i1] - KeyFrames[i0])/(float)(i1-i0)); } i0 = i1; i1++; } /* Next key run. */ } /* Initialize frame number array. */ if(pTransNode || pTransSeq) { /* More complex transform. */ int param; CvFileNode* pTN = pTransSeq?(CvFileNode*)cvGetSeqElem(pTransSeq,0):pTransNode; for(p=pElem; p; p=p->next) { //int trans_num = p->TransNum; for(param=0; param_name[param]; ++param) { const char* name = param_name[param]; float defv = param_defval[param]; if(KeyFrameNum==1) { /* Only one transform record: */ int i; double val; CvFileNode* node = cvGetFileNodeByName( fs, pTN,name); if(node == NULL) continue; val = cvReadReal(node,defv); for(i=0; i<p->TransNum; ++i) { icvUpdateTrans( p->pTrans+i, param, val, p->pImg?(float)(p->pImg->width-1):1.0f, p->pImg?(float)(p->pImg->height-1):1.0f); } } /* Next record. */ else { /* Several transforms: */ int i0,i1; double v0; double v1; CvFileNode* pTN = (CvFileNode*)cvGetSeqElem(pTransSeq,0); v0 = cvReadRealByName(fs, pTN,name,defv); for(i1=1,i0=0; i1<KeyFrameNum; ++i1) { int f0,f1; int i; CvFileNode* pTN = (CvFileNode*)cvGetSeqElem(pTransSeq,i1); CvFileNode* pVN = cvGetFileNodeByName(fs,pTN,name); if(pVN)v1 = cvReadReal(pVN,defv); else if(pVN == NULL && i1 == KeyFrameNum-1) v1 = defv; else continue; f0 = KeyFrames[i0]; f1 = KeyFrames[i1]; if(i1==(KeyFrameNum-1)) f1++; for(i=f0; i<f1; ++i) { double val; double t = (float)(i-f0); int li = i - p->FrameBegin; if(li<0) continue; if(li>= p->TransNum) break; if(KeyFrames[i1]>KeyFrames[i0]) t /=(float)(KeyFrames[i1]-KeyFrames[i0]); val = t*(v1-v0)+v0; icvUpdateTrans( p->pTrans+li, param, val, p->pImg?(float)(p->pImg->width-1):1.0f, p->pImg?(float)(p->pImg->height-1):1.0f); } /* Next transform. */ i0 = i1; v0 = v1; } /* Next value run. */ } /* Several transforms. */ } /* Next parameter. */ } /* Next record. */ } /* More complex transform. */ } /* Read transfroms. */ return pElem; } /* icvTestSeqReadElemOne */
int main(int argc, char* argv[]) { int counter; wchar_t auxstr[20]; printf("start!\n"); printf("PUCK MINH: %d\n",minH); printf("PUCK MAXH: %d\n",maxH); printf("PUCK MINS: %d\n",minS); printf("PUCK MAXS: %d\n",maxS); printf("PUCK MINV: %d\n",minV); printf("PUCK MAXV: %d\n",maxV); printf("ROBOT MINH: %d\n",RminH); printf("ROBOT MAXH: %d\n",RmaxH); printf("ROBOT MINS: %d\n",RminS); printf("ROBOT MAXS: %d\n",RmaxS); printf("ROBOT MINV: %d\n",RminV); printf("ROBOT MAXV: %d\n",RmaxV); printf("FPS: %d\n",fps); // 読み込み画像ファイル名 char imgfile[] = "camera/photodir/capmallet1.png"; char imgfile2[] = "camera/photodir/capmallet2.png"; // 画像の読み込み img = cvLoadImage(imgfile, CV_LOAD_IMAGE_ANYCOLOR | CV_LOAD_IMAGE_ANYDEPTH); img2 = cvLoadImage(imgfile2, CV_LOAD_IMAGE_ANYCOLOR | CV_LOAD_IMAGE_ANYDEPTH); // 画像の表示用ウィンドウ生成 cvNamedWindow("circle_sample", CV_WINDOW_AUTOSIZE); cvNamedWindow("circle_sample2", CV_WINDOW_AUTOSIZE); //cvNamedWindow("cv_ColorExtraction"); // Init font cvInitFont(&font,CV_FONT_HERSHEY_SIMPLEX|CV_FONT_ITALIC, 0.4,0.4,0,1); IplImage* dst_img_mallett = cvCreateImage(cvGetSize(img), IPL_DEPTH_8U, 3); IplImage* dst_img_pack = cvCreateImage(cvGetSize(img), IPL_DEPTH_8U, 3); IplImage* dst_img2_mallett = cvCreateImage(cvGetSize(img), IPL_DEPTH_8U, 3); IplImage* dst_img2_pack = cvCreateImage(cvGetSize(img), IPL_DEPTH_8U, 3); //白抽出0,255,0,15,240,255 //黒抽出0, 255, 0, 50, 0, 100 //青検出0, 255, 50, 200, 100, 180 //cv_ColorExtraction(img, dst_img_mallett, CV_BGR2HSV, 0, 255, 50, 200, 100, 180); cv_ColorExtraction(img, dst_img_pack, CV_BGR2HSV, 0, 255, 0, 50, 0, 100); cv_ColorExtraction(img2, dst_img2_mallett, CV_BGR2HSV, 0, 255, 50, 200, 100, 180); cv_ColorExtraction(img2, dst_img2_pack, CV_BGR2HSV, 0, 255, 0, 50, 0, 100); //CvMoments moment_mallett; CvMoments moment_pack; CvMoments moment2_mallett; CvMoments moment2_pack; //cvSetImageCOI(dst_img_mallett, 1); cvSetImageCOI(dst_img_pack, 1); cvSetImageCOI(dst_img2_mallett, 1); cvSetImageCOI(dst_img2_pack, 1); //cvMoments(dst_img_mallett, &moment_mallett, 0); cvMoments(dst_img_pack, &moment_pack, 0); cvMoments(dst_img2_mallett, &moment2_mallett, 0); cvMoments(dst_img2_pack, &moment2_pack, 0); //座標計算 double m00_before = cvGetSpatialMoment(&moment_pack, 0, 0); double m10_before = cvGetSpatialMoment(&moment_pack, 1, 0); double m01_before = cvGetSpatialMoment(&moment_pack, 0, 1); double m00_after = cvGetSpatialMoment(&moment2_pack, 0, 0); double m10_after = cvGetSpatialMoment(&moment2_pack, 1, 0); double m01_after = cvGetSpatialMoment(&moment2_pack, 0, 1); double gX_before = m10_before/m00_before; double gY_before = m01_before/m00_before; double gX_after = m10_after/m00_after; double gY_after = m01_after/m00_after; double m00_mallett = cvGetSpatialMoment(&moment2_mallett, 0, 0); double m10_mallett = cvGetSpatialMoment(&moment2_mallett, 1, 0); double m01_mallett = cvGetSpatialMoment(&moment2_mallett, 0, 1); double gX_now_mallett = m10_mallett/m00_mallett; double gY_now_mallett = m01_mallett/m00_mallett; cvCircle(img2, cvPoint((int)gX_before, (int)gY_before), 50, CV_RGB(0,0,255), 6, 8, 0); cvLine(img2, cvPoint((int)gX_before, (int)gY_before), cvPoint((int)gX_after, (int)gY_after), cvScalar(0,255,0), 2); int target_destanceY = 480 - 30;//Y座標の距離を一定にしている。ディフェンスライン。 //パックの移動は直線のため、一次関数の計算を使って、その後の軌跡を予測する。 double a_inclination = (gY_after - gY_before) / (gX_after - gX_before); double b_intercept = gY_after - a_inclination * gX_after; printf("gX_after: %f\n",gX_after); printf("gY_after: %f\n",gY_after); printf("gX_before: %f\n",gX_before); printf("gY_before: %f\n",gY_before); printf("a_inclination: %f\n",a_inclination); printf("b_intercept: %f\n",b_intercept); int target_coordinateX = (int)((target_destanceY - b_intercept) / a_inclination); printf("target_coordinateX: %d\n",target_coordinateX); cvLine(img2, cvPoint((int)gX_after, (int)gY_after), cvPoint((int)target_coordinateX, target_destanceY), cvScalar(0,255,255), 2); cvLine(img2, cvPoint(640, target_destanceY), cvPoint(0, target_destanceY), cvScalar(255,255,0), 2); cvLine(img2, cvPoint((int)gX_now_mallett, (int)gY_now_mallett), cvPoint((int)target_coordinateX, target_destanceY), cvScalar(0,0,255), 2); cvPutText (img2, to_c_char((int)gX_now_mallett), cvPoint(460,30), &font, cvScalar(220,50,50)); cvPutText (img2, to_c_char((int)target_coordinateX), cvPoint(560,30), &font, cvScalar(50,220,220)); // 指定したウィンドウ内に画像を表示する cvShowImage("circle_sample", img); cvShowImage("circle_sample2", img2); //pwm output test if(gpioInitialise() < 0) return 1; gpioSetMode(18, PI_OUTPUT); gpioPWM(18, 128); gpioSetPWMfrequency(18, 1000); /*if(wiringPiSetupGpio()==-1){ printf("cannotsetup gpio.\n" ); return 1; } pinMode(18,PWM_OUTPUT); pwmSetMode(PWM_MODE_MS); pwmSetClock(64); pwmSetRange(100); pwmWrite(18,50);*/ while(1) { if(cv::waitKey(30) >= 0) { break; } } //Clean up used images cvReleaseImage(&img); // cvReleaseImage (&dst_img); cvDestroyAllWindows() ; return 0; }
int main(){ IplImage* frame = 0; frame = cvQueryFrame(capture); // If we couldn't grab a frame... quit if(!frame) break; if (imgScribble == NULL) { imgScribble = cvCreateImage(cvGetSize(frame), 8, 3); } // Holds the color thresholded image (color = white, rest = black) IplImage* imgYellowThresh = GetThresholdedImage(frame, 4); IplImage* imgGreenThresh = GetThresholdedImage(frame, 1); IplImage* imgBlueThresh = GetThresholdedImage(frame, 2); IplImage* imgRedThresh = GetThresholdedImage(frame, 3); // Calculate the moments to estimate the position of the ball CvMoments *moments_yellow = (CvMoments*) malloc(sizeof(CvMoments)); CvMoments *moments_green = (CvMoments*) malloc(sizeof(CvMoments)); CvMoments *moments_blue = (CvMoments*) malloc(sizeof(CvMoments)); CvMoments *moments_red = (CvMoments*) malloc(sizeof(CvMoments)); cvMoments(imgYellowThresh, moments_yellow, 1); cvMoments(imgGreenThresh, moments_green, 1); cvMoments(imgBlueThresh, moments_blue, 1); cvMoments(imgRedThresh, moments_red, 1); // The actual moment values double moment10y = cvGetSpatialMoment(moments_yellow, 1, 0); double moment01y = cvGetSpatialMoment(moments_yellow, 0, 1); double areay = cvGetCentralMoment(moments_yellow, 0, 0); double moment10g = cvGetSpatialMoment(moments_green, 1, 0); double moment01g = cvGetSpatialMoment(moments_green, 0, 1); double areag = cvGetCentralMoment(moments_green, 0, 0); double moment10b = cvGetSpatialMoment(moments_blue, 1, 0); double moment01b = cvGetSpatialMoment(moments_blue, 0, 1); double areab = cvGetCentralMoment(moments_blue, 0, 0); double moment10r = cvGetSpatialMoment(moments_red, 1, 0); double moment01r = cvGetSpatialMoment(moments_red, 0, 1); double arear = cvGetCentralMoment(moments_red, 0, 0); //* Yellow processing *// // Holding the last and current color positions static int posXy = 0; static int posYy = 0; lastXy = posXy; lastYy = posYy; int tempXy = moment10y / areay; int tempYy = moment01y / areay; if ( tempXy >= 0 && tempYy >= 0 && tempXy < 700 && tempYy < 700 && areay>1000 ){ posXy = moment10y / areay; posYy = moment01y / areay; } // Print it out for debugging purposes std::cout << std::endl; printf("position yellow (%d,%d)\n", posXy, posYy); std::cout <<"area yellow : "<< areay << std::endl; //* Green Processing *// static int posXg = 0; static int posYg = 0; lastXg = posXg; lastYg = posYg; int tempXg = moment10g / areag; int tempYg = moment01g / areag; if ( tempXg >= 0 && tempYg >= 0 && tempXg < 700 && tempYg < 700 && areag>1000 ){ posXg = moment10g / areag; posYg = moment01g / areag; } // Print it out for debugging purposes printf("position green (%d,%d)\n", posXg, posYg); std::cout <<"area green : "<< areag << std::endl; static int posXb = 0; static int posYb = 0; lastXb = posXb; lastYb = posYb; int tempXb = moment10b / areab; int tempYb = moment01b / areab; if ( tempXb >= 0 && tempYb >= 0 && tempXb < 700 && tempYb < 700 && areab>700 ){ posXb = moment10b / areab; posYb = moment01b / areab; } // Print it out for debugging purposes printf("position blue (%d,%d)\n", posXb, posYb); std::cout <<"area blue : "<< areab << std::endl; static int posXr = 0; static int posYr = 0; lastXr = posXr; lastYr = posYr; int tempXr = moment10r / arear; int tempYr = moment01r / arear; if ( tempXr >= 0 && tempYr >= 0 && tempXr < 700 && tempYr < 700 && arear>1000 ){ posXr = moment10r / arear; posYr = moment01r / arear; } printf("position red (%d,%d)\n", posXr, posYr); std::cout <<"area red : "<< arear << std::endl; // We want to draw a line only if its a valid position if (lastXy > 0 && lastYy > 0 && posXy > 0 && posYy > 0 && lastXy < 700 && lastYy < 700 && posXy < 700 && posYy < 700) { // Draw a line from the previous point to the current point cvLine(imgScribble, cvPoint(posXy, posYy), cvPoint(lastXy, lastYy), cvScalar(0, 255, 255), 5); } if (lastXg > 0 && lastYg > 0 && posXg > 0 && posYg > 0 && lastXg < 700 && lastYg < 700 && posXg < 700 && posYg < 700) { cvLine(imgScribble, cvPoint(posXg, posYg), cvPoint(lastXg, lastYg), cvScalar(0, 255, 0), 5); } if (lastXb > 0 && lastYb > 0 && posXb > 0 && posYb > 0 && lastXb < 700 && lastYb < 700 && posXb < 700 && posYb < 700) { cvLine(imgScribble, cvPoint(posXb, posYb), cvPoint(lastXb, lastYb), cvScalar(255, 0, 0), 5); } if (lastXr > 0 && lastYr > 0 && posXr > 0 && posYr > 0 && lastXr < 700 && lastYr < 700 && posXr < 700 && posYr < 700) { cvLine(imgScribble, cvPoint(posXr, posYr), cvPoint(lastXr, lastYr), cvScalar(0, 0, 255), 5); } // Add the scribbling image and the frame... and we get a combination of the 4 cvAdd(frame, imgScribble, frame); cvShowImage("thresh yellow", imgYellowThresh); cvShowImage("thresh green", imgGreenThresh); cvShowImage("thresh blue", imgBlueThresh); cvShowImage("thresh red", imgRedThresh); cvShowImage("video", frame); // Release the thresholded image cvReleaseImage(&imgYellowThresh); cvReleaseImage(&imgGreenThresh); cvReleaseImage(&imgBlueThresh); cvReleaseImage(&imgRedThresh); delete moments_yellow; delete moments_green; delete moments_blue; delete moments_red; cvShowImage("video", frame); }
virtual void Process(IplImage* pImg, IplImage* pImgFG = NULL) { CvSeq* cnts; CvSeq* cnt; int i; //CvMat* pMC = NULL; if(m_BlobList.GetBlobNum() <= 0 ) return; /* Clear blob list for new blobs: */ m_BlobListNew.Clear(); assert(m_pMem); cvClearMemStorage(m_pMem); assert(pImgFG); { /* One contour - one blob: */ IplImage* pBin = cvCloneImage(pImgFG); assert(pBin); cvThreshold(pBin,pBin,128,255,CV_THRESH_BINARY); cvFindContours(pBin, m_pMem, &cnts, sizeof(CvContour), CV_RETR_EXTERNAL); /* Process each contour: */ for(cnt = cnts; cnt; cnt=cnt->h_next) { CvBlob NewBlob; /* Image moments: */ double M00,X,Y,XX,YY; CvMoments m; CvRect r = ((CvContour*)cnt)->rect; CvMat mat; if(r.height < 3 || r.width < 3) continue; cvMoments( cvGetSubRect(pImgFG,&mat,r), &m, 0 ); M00 = cvGetSpatialMoment( &m, 0, 0 ); if(M00 <= 0 ) continue; X = cvGetSpatialMoment( &m, 1, 0 )/M00; Y = cvGetSpatialMoment( &m, 0, 1 )/M00; XX = (cvGetSpatialMoment( &m, 2, 0 )/M00) - X*X; YY = (cvGetSpatialMoment( &m, 0, 2 )/M00) - Y*Y; NewBlob = cvBlob(r.x+(float)X,r.y+(float)Y,(float)(4*sqrt(XX)),(float)(4*sqrt(YY))); m_BlobListNew.AddBlob(&NewBlob); } /* Next contour. */ cvReleaseImage(&pBin); } for(i=m_BlobList.GetBlobNum(); i>0; --i) { /* Predict new blob position. */ CvBlob* pB = NULL; DefBlobTrackerCR* pBT = (DefBlobTrackerCR*)m_BlobList.GetBlob(i-1); /* Update predictor. */ pBT->pPredictor->Update(&(pBT->blob)); pB = pBT->pPredictor->Predict(); if(pB) { pBT->BlobPredict = pB[0]; } pBT->BlobPrev = pBT->blob; } /* Predict new blob position. */ if(m_BlobList.GetBlobNum()>0 && m_BlobListNew.GetBlobNum()>0) { /* Resolve new blob to old: */ int i,j; int NOld = m_BlobList.GetBlobNum(); int NNew = m_BlobListNew.GetBlobNum(); for(i=0; i<NOld; i++) { /* Set 0 collision and clear all hyp: */ DefBlobTrackerCR* pF = (DefBlobTrackerCR*)m_BlobList.GetBlob(i); pF->Collision = 0; pF->pBlobHyp->Clear(); } /* Set 0 collision. */ /* Create correspondence records: */ for(j=0; j<NNew; ++j) { CvBlob* pB1 = m_BlobListNew.GetBlob(j); DefBlobTrackerCR* pFLast = NULL; for(i=0; i<NOld; i++) { /* Check intersection: */ int Intersection = 0; DefBlobTrackerCR* pF = (DefBlobTrackerCR*)m_BlobList.GetBlob(i); CvBlob* pB2 = &(pF->BlobPredict); if( fabs(pB1->x-pB2->x)<0.5*(pB1->w+pB2->w) && fabs(pB1->y-pB2->y)<0.5*(pB1->h+pB2->h) ) Intersection = 1; if(Intersection) { if(pFLast) { pF->Collision = pFLast->Collision = 1; } pFLast = pF; pF->pBlobHyp->AddBlob(pB1); } } /* Check intersection. */ } /* Check next new blob. */ } /* Resolve new blob to old. */ for(i=m_BlobList.GetBlobNum(); i>0; --i) { /* Track each blob. */ CvBlob* pB = m_BlobList.GetBlob(i-1); DefBlobTrackerCR* pBT = (DefBlobTrackerCR*)pB; int BlobID = CV_BLOB_ID(pB); //CvBlob* pBBest = NULL; //double DistBest = -1; int j; if(pBT->pResolver) { pBT->pResolver->SetCollision(pBT->Collision); } if(pBT->Collision) { /* Tracking in collision: */ if(pBT->pResolver) { pB[0] = pBT->pResolver->Process(&(pBT->BlobPredict),pImg, pImgFG)[0]; } } /* Tracking in collision. */ else { /* Non-collision tracking: */ CvBlob NewCC = pBT->BlobPredict; if(pBT->pBlobHyp->GetBlobNum()==1) { /* One blob to one CC: */ NewCC = pBT->pBlobHyp->GetBlob(0)[0]; } else { /* One blob several CC: */ CvBlob* pBBest = NULL; double DistBest = -1; double CMax = 0; for(j=pBT->pBlobHyp->GetBlobNum();j>0;--j) { /* Find best CC: */ CvBlob* pBNew = pBT->pBlobHyp->GetBlob(j-1); if(pBT->pResolver) { /* Choose CC by confidence: */ // double dx = fabs(CV_BLOB_X(pB)-CV_BLOB_X(pBNew)); // double dy = fabs(CV_BLOB_Y(pB)-CV_BLOB_Y(pBNew)); double C = pBT->pResolver->GetConfidence(pBNew,pImg, pImgFG); if(C > CMax || pBBest == NULL) { CMax = C; pBBest = pBNew; } } else { /* Choose CC by distance: */ double dx = fabs(CV_BLOB_X(pB)-CV_BLOB_X(pBNew)); double dy = fabs(CV_BLOB_Y(pB)-CV_BLOB_Y(pBNew)); double Dist = sqrt(dx*dx+dy*dy); if(Dist < DistBest || pBBest == NULL) { DistBest = Dist; pBBest = pBNew; } } } /* Find best CC. */ if(pBBest) NewCC = pBBest[0]; } /* One blob several CC. */ pB->x = NewCC.x; pB->y = NewCC.y; pB->w = (m_AlphaSize)*NewCC.w+(1-m_AlphaSize)*pB->w; pB->h = (m_AlphaSize)*NewCC.h+(1-m_AlphaSize)*pB->h; pBT->pResolver->SkipProcess(&(pBT->BlobPredict),pImg, pImgFG); } /* Non-collision tracking. */ pBT->pResolver->Update(pB, pImg, pImgFG); CV_BLOB_ID(pB)=BlobID; } /* Track next blob. */ if(m_Wnd) { IplImage* pI = cvCloneImage(pImg); int i; for(i=m_BlobListNew.GetBlobNum(); i>0; --i) { /* Draw each new CC: */ CvBlob* pB = m_BlobListNew.GetBlob(i-1); CvPoint p = cvPointFrom32f(CV_BLOB_CENTER(pB)); int x = cvRound(CV_BLOB_RX(pB)), y = cvRound(CV_BLOB_RY(pB)); CvSize s = cvSize(MAX(1,x), MAX(1,y)); //int c = 255; cvEllipse( pI, p, s, 0, 0, 360, CV_RGB(255,255,0), 1 ); } for(i=m_BlobList.GetBlobNum(); i>0; --i) { /* Draw each new CC: */ DefBlobTrackerCR* pF = (DefBlobTrackerCR*)m_BlobList.GetBlob(i-1); CvBlob* pB = &(pF->BlobPredict); CvPoint p = cvPointFrom32f(CV_BLOB_CENTER(pB)); int x = cvRound(CV_BLOB_RX(pB)), y = cvRound(CV_BLOB_RY(pB)); CvSize s = cvSize(MAX(1,x), MAX(1,y)); cvEllipse( pI, p, s, 0, 0, 360, CV_RGB(0,0,255), 1 ); pB = &(pF->blob); p = cvPointFrom32f(CV_BLOB_CENTER(pB)); x = cvRound(CV_BLOB_RX(pB)); y = cvRound(CV_BLOB_RY(pB)); s = cvSize(MAX(1,x), MAX(1,y)); cvEllipse( pI, p, s, 0, 0, 360, CV_RGB(0,255,0), 1 ); } //cvNamedWindow("CCwithCR",0); //cvShowImage("CCwithCR",pI); cvReleaseImage(&pI); } } /* Process. */
void Calibrator::on_camaraButton_clicked() { if(capturing) { while(true) { // Will hold a frame captured from the camera frame = 0; frame = cvQueryFrame(capture); // If we couldn't grab a frame... quit if(!frame) break; // If this is the first frame, we need to initialize it // if(imgScribble == NULL) // { // imgScribble = cvCreateImage(cvGetSize(frame), 8, 3); // } // // Holds the yellow thresholded image (yellow = white, rest = black) IplImage* imgGreenThresh = GetThresholdedImage(frame); // Calculate the moments to estimate the position of the ball CvMoments *moments = (CvMoments*)malloc(sizeof(CvMoments)); cvMoments(imgGreenThresh, moments, 1); // The actual moment values double moment10 = cvGetSpatialMoment(moments, 1, 0); double moment01 = cvGetSpatialMoment(moments, 0, 1); double area = cvGetCentralMoment(moments, 0, 0); // Holding the last and current ball positions static int posX = 0; static int posY = 0; // int lastX = posX; // int lastY = posY; posX = moment10/area; posY = moment01/area; // Print it out for debugging purposes // printf("position (%d,%d)\n", posX, posY); playerPos = QPoint(posX, posY); QString str = "( " + QString::number(posX) + " , " + QString::number(posY) + " ) "; ui->positionLabel->setText(str); // We want to draw a line only if its a valid position // if(lastX>0 && lastY>0 && posX>0 && posY>0) // { // // Draw a yellow line from the previous point to the current point // cvLine(imgScribble, cvPoint(posX, posY), cvPoint(lastX, lastY), cvScalar(0,255,255), 5); // } // // Add the scribbling image and the frame... // cvAdd(frame, imgScribble, frame); cvShowImage("threshold", imgGreenThresh); cvShowImage("video", frame); // Wait for a keypress int c = cvWaitKey(10); if(c!=-1) { // If pressed, break out of the loop cvDestroyWindow("threshold"); cvDestroyWindow("video"); break; } // Release the thresholded image+moments... we need no memory leaks.. please cvReleaseImage(&imgGreenThresh); delete moments; } // We're done using the camera. Other applications can now use it //cvReleaseCapture(&capture); //cvReleaseWindow?(); } }
int main(int argc, char* argv[]) { int counter; wchar_t auxstr[20]; printf("start!\n"); printf("PUCK MINH: %d\n",minH); printf("PUCK MAXH: %d\n",maxH); printf("PUCK MINS: %d\n",minS); printf("PUCK MAXS: %d\n",maxS); printf("PUCK MINV: %d\n",minV); printf("PUCK MAXV: %d\n",maxV); printf("ROBOT MINH: %d\n",RminH); printf("ROBOT MAXH: %d\n",RmaxH); printf("ROBOT MINS: %d\n",RminS); printf("ROBOT MAXS: %d\n",RmaxS); printf("ROBOT MINV: %d\n",RminV); printf("ROBOT MAXV: %d\n",RmaxV); printf("FPS: %d\n",fps); //pwm initialize if(gpioInitialise() < 0) return 1; // 読み込み画像ファイル名 char imgfile[] = "camera/photodir/capmallet1.png"; char imgfile2[] = "camera/photodir/capmallet2.png"; // 画像の読み込み img = cvLoadImage(imgfile, CV_LOAD_IMAGE_ANYCOLOR | CV_LOAD_IMAGE_ANYDEPTH); img2 = cvLoadImage(imgfile2, CV_LOAD_IMAGE_ANYCOLOR | CV_LOAD_IMAGE_ANYDEPTH); // 画像の表示用ウィンドウ生成 cvNamedWindow("circle_sample", CV_WINDOW_AUTOSIZE); cvNamedWindow("circle_sample2", CV_WINDOW_AUTOSIZE); //cvNamedWindow("cv_ColorExtraction"); // Init font cvInitFont(&font,CV_FONT_HERSHEY_SIMPLEX|CV_FONT_ITALIC, 0.4,0.4,0,1); IplImage* dst_img_mallett = cvCreateImage(cvGetSize(img), IPL_DEPTH_8U, 3); IplImage* dst_img_pack = cvCreateImage(cvGetSize(img), IPL_DEPTH_8U, 3); IplImage* dst_img2_mallett = cvCreateImage(cvGetSize(img), IPL_DEPTH_8U, 3); IplImage* dst_img2_pack = cvCreateImage(cvGetSize(img), IPL_DEPTH_8U, 3); //白抽出0,255,0,15,240,255 //黒抽出0, 255, 0, 50, 0, 100 //青検出0, 255, 50, 200, 100, 180 //cv_ColorExtraction(img, dst_img_mallett, CV_BGR2HSV, 0, 255, 50, 200, 100, 180); cv_ColorExtraction(img, dst_img_pack, CV_BGR2HSV, 0, 255, 0, 50, 0, 100); cv_ColorExtraction(img2, dst_img2_mallett, CV_BGR2HSV, 0, 255, 50, 200, 100, 180); cv_ColorExtraction(img2, dst_img2_pack, CV_BGR2HSV, 0, 255, 0, 50, 0, 100); //CvMoments moment_mallett; CvMoments moment_pack; CvMoments moment2_mallett; CvMoments moment2_pack; //cvSetImageCOI(dst_img_mallett, 1); cvSetImageCOI(dst_img_pack, 1); cvSetImageCOI(dst_img2_mallett, 1); cvSetImageCOI(dst_img2_pack, 1); //cvMoments(dst_img_mallett, &moment_mallett, 0); cvMoments(dst_img_pack, &moment_pack, 0); cvMoments(dst_img2_mallett, &moment2_mallett, 0); cvMoments(dst_img2_pack, &moment2_pack, 0); //座標計算 double m00_before = cvGetSpatialMoment(&moment_pack, 0, 0); double m10_before = cvGetSpatialMoment(&moment_pack, 1, 0); double m01_before = cvGetSpatialMoment(&moment_pack, 0, 1); double m00_after = cvGetSpatialMoment(&moment2_pack, 0, 0); double m10_after = cvGetSpatialMoment(&moment2_pack, 1, 0); double m01_after = cvGetSpatialMoment(&moment2_pack, 0, 1); double gX_before = m10_before/m00_before; double gY_before = m01_before/m00_before; double gX_after = m10_after/m00_after; double gY_after = m01_after/m00_after; double m00_mallett = cvGetSpatialMoment(&moment2_mallett, 0, 0); double m10_mallett = cvGetSpatialMoment(&moment2_mallett, 1, 0); double m01_mallett = cvGetSpatialMoment(&moment2_mallett, 0, 1); double gX_now_mallett = m10_mallett/m00_mallett; double gY_now_mallett = m01_mallett/m00_mallett; cvCircle(img2, cvPoint((int)gX_before, (int)gY_before), 50, CV_RGB(0,0,255), 6, 8, 0); cvLine(img2, cvPoint((int)gX_before, (int)gY_before), cvPoint((int)gX_after, (int)gY_after), cvScalar(0,255,0), 2); int target_destanceY = 480 - 30;//Y座標の距離を一定にしている。ディフェンスライン。 //パックの移動は直線のため、一次関数の計算を使って、その後の軌跡を予測する。 double a_inclination = (gY_after - gY_before) / (gX_after - gX_before); double b_intercept = gY_after - a_inclination * gX_after; printf("gX_after: %f\n",gX_after); printf("gY_after: %f\n",gY_after); printf("gX_before: %f\n",gX_before); printf("gY_before: %f\n",gY_before); printf("a_inclination: %f\n",a_inclination); printf("b_intercept: %f\n",b_intercept); int target_coordinateX = (int)((target_destanceY - b_intercept) / a_inclination); printf("target_coordinateX: %d\n",target_coordinateX); cvLine(img2, cvPoint((int)gX_after, (int)gY_after), cvPoint((int)target_coordinateX, target_destanceY), cvScalar(0,255,255), 2); cvLine(img2, cvPoint(640, target_destanceY), cvPoint(0, target_destanceY), cvScalar(255,255,0), 2); cvLine(img2, cvPoint((int)gX_now_mallett, (int)gY_now_mallett), cvPoint((int)target_coordinateX, target_destanceY), cvScalar(0,0,255), 2); cvPutText (img2, to_c_char((int)gX_now_mallett), cvPoint(460,30), &font, cvScalar(220,50,50)); cvPutText (img2, to_c_char((int)target_coordinateX), cvPoint(560,30), &font, cvScalar(50,220,220)); int amount_movement = gX_now_mallett - target_coordinateX; //2枚の画像比較1回で移動できる量の計算 int max_amount_movement = CAM_PIX_WIDTH * 0.54 / 1; //CAM_PIX_WIDTH:640, 比較にかかる時間:0.27*2, 端までの移動時間:1s int target_direction; if(amount_movement > 0){ if(max_amount_movement < amount_movement){ amount_movement = max_amount_movement; } target_direction = 0;//時計回り } else if(amount_movement < 0){ amount_movement = -amount_movement;//正の数にする if(max_amount_movement < amount_movement){ amount_movement = max_amount_movement; } target_direction = 1;//反時計回り } //pwm output double set_time_millis= 270 * amount_movement / max_amount_movement;//0.27ms*(0~1) gpioSetMode(18, PI_OUTPUT); gpioSetMode(19, PI_OUTPUT); gpioPWM(18, 128); gpioWrite(19, target_direction); int closest_frequency = gpioSetPWMfrequency(18, 2000); printf("setting_frequency: %d\n", closest_frequency); gpioSetTimerFunc(0, (int)set_time_millis, pwmReset); // 指定したウィンドウ内に画像を表示する cvShowImage("circle_sample", img); cvShowImage("circle_sample2", img2); while(1) { if(cv::waitKey(30) >= 0) { break; } } gpioTerminate(); //Clean up used images cvReleaseImage(&img); // cvReleaseImage (&dst_img); cvDestroyAllWindows() ; return 0; }
virtual void Process(IplImage* pImg, IplImage* pImgFG = NULL) { CvSeq* cnts; CvSeq* cnt; int i; m_pImg = pImg; m_pImgFG = pImgFG; if(m_BlobList.GetBlobNum() <= 0 ) return; /* Clear bloblist for new blobs: */ m_BlobListNew.Clear(); assert(m_pMem); cvClearMemStorage(m_pMem); assert(pImgFG); /* Find CC: */ #if 0 { // By contour clustering: cvFindBlobsByCCClasters(pImgFG, &m_BlobListNew, m_pMem); } #else { /* One contour - one blob: */ IplImage* pBin = cvCloneImage(pImgFG); assert(pBin); cvThreshold(pBin,pBin,128,255,CV_THRESH_BINARY); cvFindContours(pBin, m_pMem, &cnts, sizeof(CvContour), CV_RETR_EXTERNAL); /* Process each contour: */ for(cnt = cnts; cnt; cnt=cnt->h_next) { CvBlob NewBlob; /* Image moments: */ double M00,X,Y,XX,YY; CvMoments m; CvRect r = ((CvContour*)cnt)->rect; CvMat mat; if(r.height < 3 || r.width < 3) continue; cvMoments( cvGetSubRect(pImgFG,&mat,r), &m, 0 ); M00 = cvGetSpatialMoment( &m, 0, 0 ); if(M00 <= 0 ) continue; X = cvGetSpatialMoment( &m, 1, 0 )/M00; Y = cvGetSpatialMoment( &m, 0, 1 )/M00; XX = (cvGetSpatialMoment( &m, 2, 0 )/M00) - X*X; YY = (cvGetSpatialMoment( &m, 0, 2 )/M00) - Y*Y; NewBlob = cvBlob(r.x+(float)X,r.y+(float)Y,(float)(4*sqrt(XX)),(float)(4*sqrt(YY))); m_BlobListNew.AddBlob(&NewBlob); } /* Next contour. */ cvReleaseImage(&pBin); } #endif for(i=m_BlobList.GetBlobNum(); i>0; --i) { /* Predict new blob position: */ CvBlob* pB=NULL; DefBlobTracker* pBT = (DefBlobTracker*)m_BlobList.GetBlob(i-1); /* Update predictor by previous value of blob: */ pBT->pPredictor->Update(&(pBT->blob)); /* Predict current position: */ pB = pBT->pPredictor->Predict(); if(pB) { pBT->BlobPredict = pB[0]; } else { pBT->BlobPredict = pBT->blob; } } /* Predict new blob position. */ if(m_Collision) for(i=m_BlobList.GetBlobNum(); i>0; --i) { /* Predict collision. */ int Collision = 0; int j; DefBlobTracker* pF = (DefBlobTracker*)m_BlobList.GetBlob(i-1); for(j=m_BlobList.GetBlobNum(); j>0; --j) { /* Predict collision: */ CvBlob* pB1; CvBlob* pB2; DefBlobTracker* pF2 = (DefBlobTracker*)m_BlobList.GetBlob(j-1); if(i==j) continue; pB1 = &pF->BlobPredict; pB2 = &pF2->BlobPredict; if( fabs(pB1->x-pB2->x)<0.6*(pB1->w+pB2->w) && fabs(pB1->y-pB2->y)<0.6*(pB1->h+pB2->h) ) Collision = 1; pB1 = &pF->blob; pB2 = &pF2->blob; if( fabs(pB1->x-pB2->x)<0.6*(pB1->w+pB2->w) && fabs(pB1->y-pB2->y)<0.6*(pB1->h+pB2->h) ) Collision = 1; if(Collision) break; } /* Check next blob to cross current. */ pF->Collision = Collision; } /* Predict collision. */ for(i=m_BlobList.GetBlobNum(); i>0; --i) { /* Find a neighbour on current frame * for each blob from previous frame: */ CvBlob* pBl = m_BlobList.GetBlob(i-1); DefBlobTracker* pBT = (DefBlobTracker*)pBl; //int BlobID = CV_BLOB_ID(pB); //CvBlob* pBBest = NULL; //double DistBest = -1; //int j; if(pBT->pBlobHyp->GetBlobNum()>0) { /* Track all hypotheses: */ int h,hN = pBT->pBlobHyp->GetBlobNum(); for(h=0; h<hN; ++h) { int j, jN = m_BlobListNew.GetBlobNum(); CvBlob* pB = pBT->pBlobHyp->GetBlob(h); int BlobID = CV_BLOB_ID(pB); CvBlob* pBBest = NULL; double DistBest = -1; for(j=0; j<jN; j++) { /* Find best CC: */ double Dist = -1; CvBlob* pBNew = m_BlobListNew.GetBlob(j); double dx = fabs(CV_BLOB_X(pB)-CV_BLOB_X(pBNew)); double dy = fabs(CV_BLOB_Y(pB)-CV_BLOB_Y(pBNew)); if(dx > 2*CV_BLOB_WX(pB) || dy > 2*CV_BLOB_WY(pB)) continue; Dist = sqrt(dx*dx+dy*dy); if(Dist < DistBest || pBBest == NULL) { DistBest = Dist; pBBest = pBNew; } } /* Find best CC. */ if(pBBest) { pB[0] = pBBest[0]; CV_BLOB_ID(pB) = BlobID; } else { /* Delete this hypothesis. */ pBT->pBlobHyp->DelBlob(h); h--; hN--; } } /* Next hypothysis. */ } /* Track all hypotheses. */ } /* Track next blob. */ m_ClearHyp = 1; } /* Process. */