string MCRenderer::response(const IplImage* currentImage) { int key = cvWaitKey(10); switch(key) { case 's': if(currentImage) { IplImage *temp = cvCreateImage(cvSize(currentImage->width, currentImage->height), IPL_DEPTH_32F, 3); cvConvertScale(currentImage, temp, 1); saveImagePFM(savePath, temp); cvReleaseImage(&temp); } break; case 'q': return "quit"; } return ""; }
bool optimizeDepthMap() { cvErode(uImage,uImage,0,2); //Smoothen the User Map as well cvDilate(uImage,uImage,0,2); CvScalar depthMean=cvAvg(dImage,uImage); //Get teh Average Depth Value of the User Pixels cvNot(uImage,uImage); //Invert the user pixels to paint the rest of the image with average user depth //viewImage(dImage); cvSet(dImage,depthMean,uImage); IplImage* tempImage=cvCreateImage(dSize,IPL_DEPTH_8U,1); cvConvertScale(dImage,tempImage,1.0/256); cvSmooth(tempImage,tempImage,CV_GAUSSIAN,7);//Perform Gaussian Smoothing, depth map is optimized. cvConvert(tempImage,dImage); cvScale(dImage,dImage,256); cvSet(dImage,cvScalar(0),uImage); //viewImage(dImage); //cvSmooth(dImage,dImage,CV_GAUSSIAN,gaussian_m,gaussian_n,gaussian_e);//Perform Gaussian Smoothing, depth map is optimized. cvNot(uImage,uImage); cvReleaseImage(&tempImage); return true; }
IplImage *get_gray(const IplImage *img) { if (!img) { return NULL; } IplImage *gray8, *gray32; gray32 = cvCreateImage(cvGetSize(img), IPL_DEPTH_32F, 1); if (img->nChannels == 1) { gray8 = (IplImage *)cvClone(img); } else { gray8 = cvCreateImage(cvGetSize(img), IPL_DEPTH_8U, 1); cvCvtColor(img, gray8, CV_BGR2GRAY); } cvConvertScale(gray8, gray32, 1.0 / 255.0, 0); cvReleaseImage(&gray8); return gray32; }
/* Create a camshift tracked object from a region in image. */ TrackedObj* FaceBl0r::create_tracked_object (IplImage* image, CvRect* region) { TrackedObj* obj; //allocate memory for tracked object struct if((obj = (TrackedObj *) malloc(sizeof *obj)) != NULL) { //create-image: size(w,h), bit depth, channels obj->hsv = cvCreateImage(cvGetSize(image), 8, 3); obj->mask = cvCreateImage(cvGetSize(image), 8, 1); obj->hue = cvCreateImage(cvGetSize(image), 8, 1); obj->prob = cvCreateImage(cvGetSize(image), 8, 1); int hist_bins = 30; //number of histogram bins float hist_range[] = {0,180}; //histogram range float* range = hist_range; obj->hist = cvCreateHist(1, //number of hist dimensions &hist_bins, //array of dimension sizes CV_HIST_ARRAY, //representation format &range, //array of ranges for bins 1); //uniformity flag } //create a new hue image update_hue_image(image, obj); float max_val = 0.f; //create a histogram representation for the face cvSetImageROI(obj->hue, *region); cvSetImageROI(obj->mask, *region); cvCalcHist(&obj->hue, obj->hist, 0, obj->mask); cvGetMinMaxHistValue(obj->hist, 0, &max_val, 0, 0 ); cvConvertScale(obj->hist->bins, obj->hist->bins, max_val ? 255.0/max_val : 0, 0); cvResetImageROI(obj->hue); cvResetImageROI(obj->mask); //store the previous face location obj->prev_rect = *region; return obj; }
/** * @brief Convierte una imágen RGB a HSV * @param bgr La imágen original en RGB * @param objFRAME El objeto OpenCL asociado a la imágen RGB usado en el kernel * @param context El contexto de dispositivos OpenCL * @param kernelHSV El kernel OpenCL que se debe ejecutar para calcular la imágen HSV * @param command_queue La cola del dispositivo OpenCL * @param work_items El número de unidades de cómputo a usar en el cálculo * @return Una nueva imágen ya en HSV de 32-bit con S y V en el rango [0,1] y H en [0,360] */ IplImage* bgr2hsv( IplImage *bgr, cl_mem *objFRAME, cl_context *context, cl_kernel *kernelHSV, cl_command_queue *command_queue, size_t work_items ) { cl_int ret; IplImage *bgr32f; bgr32f = cvCreateImage( cvGetSize(bgr), IPL_DEPTH_32F, 3 ); cvConvertScale( bgr, bgr32f, 1.0 / 255.0, 0 ); ret = clEnqueueWriteBuffer(command_queue[0], objFRAME[0], CL_FALSE, 0, bgr32f->imageSize, bgr32f->imageData, 0, NULL, NULL); // Establecemos los argumentos del kernel ret = clSetKernelArg(kernelHSV[0], 0, sizeof(cl_mem), (void *)objFRAME); ret = clSetKernelArg(kernelHSV[0], 1, sizeof(int), &bgr32f->widthStep); ret = clSetKernelArg(kernelHSV[0], 2, sizeof(int), &bgr32f->height); ret = clSetKernelArg(kernelHSV[0], 3, sizeof(int), &bgr32f->width); size_t local = 128; size_t global = work_items * local; // Ejecutamos el kernel como paralelismo de datos ret = clEnqueueNDRangeKernel(command_queue[0], kernelHSV[0], 1, NULL, &global, &local, 0, NULL, NULL); return bgr32f; }
/** * Create Mask from comp buf node * @param cbuf * @return IplImage of Mask */ IplImage* BOCV_Mask_attach(CompBuf* cbuf) { IplImage *mask; IplImage *img; if(cbuf == NULL) return NULL; if(cbuf->x>0 && cbuf->y>0 ){ //Create image from comp buf img = cvCreateImageHeader(cvSize(cbuf->x,cbuf->y),IPL_DEPTH_32F,cbuf->type); cvSetData(img,cbuf->rect,cbuf->x * cbuf->type * sizeof(float)); // always 4 byte align. mask= cvCreateImage(cvGetSize(img), IPL_DEPTH_8U, cbuf->type); //Convert to 8 bit unsigned cvConvertScale(img, mask,1,0); return mask; }else{ return NULL; } }
static void gst_motiondetect_log_image (const IplImage * image, const char * debugDirectory, int index, const char * filename) { if (image && debugDirectory) { char *filepath; asprintf (&filepath, "%s/%05d_%s", debugDirectory, index, filename); if (image->depth == IPL_DEPTH_32F) { IplImage *scaledImageToLog = cvCreateImage ( cvSize (image->width, image->height), IPL_DEPTH_8U, 1); cvConvertScale (image, scaledImageToLog, 255.0, 0); cvSaveImage (filepath, scaledImageToLog, NULL); cvReleaseImage (&scaledImageToLog); } else { cvSaveImage (filepath, image, NULL); } free (filepath); } }
void CamShift::CalcHistogram(const ImgBgr& img, const CRect& sel) { selection.x = sel.left; selection.y = img.Height()-sel.bottom-1; selection.width = sel.Width(); selection.height = sel.Height(); cvCopy(ImgIplImage(img), image, 0 ); cvCvtColor( image, hsv, CV_BGR2HSV ); cvFlip(hsv,hsv,0); //cvSaveImage("hsv.bmp", hsv); //cvSaveImage("img.bmp", image); int _vmin = vmin, _vmax = vmax; cvInRangeS( hsv, cvScalar(0,smin,MIN(_vmin,_vmax),0), cvScalar(180,256,MAX(_vmin,_vmax),0), mask ); cvSplit( hsv, hue, 0, 0, 0 ); float max_val = 0.f; cvSetImageROI(hue, selection ); cvSetImageROI( mask, selection ); cvCalcHist( &hue, hist, 0, mask ); cvGetMinMaxHistValue( hist, 0, &max_val, 0, 0 ); cvConvertScale( hist->bins, hist->bins, max_val ? 255. / max_val : 0., 0 ); cvResetImageROI( hue ); cvResetImageROI( mask ); track_window = selection; // cvZero( histimg ); // int bin_w = histimg->width / hdims; // for(int i = 0; i < hdims; i++ ) // { // int a = cvGetReal1D(hist->bins,i); // int val = cvRound( cvGetReal1D(hist->bins,i)*histimg->height/255 ); // CvScalar color = hsv2rgb(i*180.f/hdims); // cvRectangle( histimg, cvPoint(i*bin_w,histimg->height), // cvPoint((i+1)*bin_w,histimg->height - val), // color, -1, 8, 0 ); // } // cvNamedWindow( "Histogram", 1 ); // // cvShowImage( "Histogram", histimg ); }
////////////////////////////////// // startTracking() // void startTracking(IplImage * pImg, CvRect * pFaceRect) { float maxVal = 0.f; // Make sure internal data structures have been allocated if( !pHist ) createTracker(pImg); // Create a new hue image updateHueImage(pImg); // Create a histogram representation for the face cvSetImageROI( pHueImg, *pFaceRect ); cvSetImageROI( pMask, *pFaceRect ); cvCalcHist( &pHueImg, pHist, 0, pMask ); cvGetMinMaxHistValue( pHist, 0, &maxVal, 0, 0 ); cvConvertScale( pHist->bins, pHist->bins, maxVal? 255.0/maxVal : 0, 0 ); cvResetImageROI( pHueImg ); cvResetImageROI( pMask ); // Store the previous face location prevFaceRect = *pFaceRect; }
int cvL1QCSolve( CvMat* A, CvMat* B, CvMat* X, double epsilon, double mu, CvTermCriteria lb_term_crit, CvTermCriteria cg_term_crit ) { CvMat* AAt = cvCreateMat( A->rows, A->rows, CV_MAT_TYPE(A->type) ); cvGEMM( A, A, 1, NULL, 0, AAt, CV_GEMM_B_T ); CvMat* W = cvCreateMat( A->rows, 1, CV_MAT_TYPE(X->type) ); if ( cvCGSolve( AAt, B, W, cg_term_crit ) > .5 ) { cvReleaseMat( &W ); cvReleaseMat( &AAt ); return -1; } cvGEMM( A, W, 1, NULL, 0, X, CV_GEMM_A_T ); cvReleaseMat( &W ); cvReleaseMat( &AAt ); CvMat* U = cvCreateMat( X->rows, X->cols, CV_MAT_TYPE(X->type) ); cvAbsDiffS( X, U, cvScalar(0) ); CvScalar sumAbsX = cvSum( U ); double minAbsX, maxAbsX; cvMinMaxLoc( U, &minAbsX, &maxAbsX ); cvConvertScale( U, U, .95, maxAbsX * .1 ); double tau = MAX( (2 * X->rows + 1) / sumAbsX.val[0], 1 ); if ( !(lb_term_crit.type & CV_TERMCRIT_ITER) ) lb_term_crit.max_iter = ceil( (log(2 * X->rows + 1) - log(lb_term_crit.epsilon) - log(tau)) / log(mu) ); CvTermCriteria nt_term_crit = cvTermCriteria( CV_TERMCRIT_EPS + CV_TERMCRIT_ITER, 50, lb_term_crit.epsilon ); for ( int i = 0; i < lb_term_crit.max_iter; ++i ) { icvL1QCNewton( A, B, X, U, epsilon, tau, nt_term_crit, cg_term_crit ); tau *= mu; } cvReleaseMat( &U ); return 0; }
void BoatDetecting::startTrackObject(){ cvInRangeS(hsv, cvScalar(0, smin, MIN(vmin, vmax), 0), cvScalar(180, 256, MAX(vmin, vmax), 0), mask); // 10,256,30 cvSplit(hsv, hue, 0, 0, 0); if (!isTrackingInitialized){ // 如果跟踪窗口未初始化 float max_val = 0.f; cvSetImageROI(hue, selection); cvSetImageROI(mask, selection); cvCalcHist(&hue, hist, 0, mask); cvGetMinMaxHistValue(hist, 0, &max_val, 0, 0); cvConvertScale(hist->bins, hist->bins, max_val ? 255. / max_val : 0., 0); cvResetImageROI(hue); cvResetImageROI(mask); trackWindow = selection; isTrackingInitialized = true; } cvCalcBackProject(&hue, backproject, hist); //cvShowImage("Hue Channel",backproject); cvAnd(backproject, mask, backproject, 0); //if (trackWindow.x + trackWindow.width/2< allfWidth &&trackWindow.y + trackWindow.height/2< allfHeight &&trackWindow.x>0) if (trackWindow.x + trackWindow.width< allfWidth &&trackWindow.y + trackWindow.height< allfHeight &&trackWindow.x>0) cvCamShift(backproject, trackWindow, cvTermCriteria(CV_TERMCRIT_EPS | CV_TERMCRIT_ITER, 20, 1), &trackComp, 0);//初始化跟踪窗口以后直接用trackWindow做跟踪,每帧都会更新 //if (trackComp.rect.width<90 && trackComp.rect.y<200){ // trackWindow = trackComp.rect; //} //if (trackComp.rect.y>200) //{ // trackWindow = trackComp.rect; //} trackWindow = trackComp.rect; }
/* Converts an image to 32-bit grayscale @param img a 3-channel 8-bit color (BGR) or 8-bit gray image @return Returns a 32-bit grayscale image */ static IplImage* convert_to_gray32( IplImage* img ) { IplImage* gray8, * gray32; gray32 = cvCreateImage( cvGetSize(img), IPL_DEPTH_32F, 1 );//创建32位单通道图像 //首先将原图转换为8位单通道图像 if( img->nChannels == 1 )//若原图本身就是单通道,直接克隆原图 gray8 = cvClone( img ); else//若原图是3通道图像 { gray8 = cvCreateImage( cvGetSize(img), IPL_DEPTH_8U, 1 );//创建8位单通道图像 cvCvtColor( img, gray8, CV_BGR2GRAY );//将原图转换为8为单通道图像 } //然后将8为单通道图像gray8转换为32位单通道图像,并进行归一化处理(除以255) cvConvertScale( gray8, gray32, 1.0 / 255.0, 0 ); cvReleaseImage( &gray8 );//释放临时图像 return gray32;//返回32位单通道图像 }
void renderChainsWithBoxes(IplImage * SWTImage, std::vector<std::vector<Point2d> > & components, std::vector<Chain> & chains, std::vector<std::pair<Point2d, Point2d> > & compBB, std::vector<std::pair<CvPoint, CvPoint> > & bb, IplImage * output) { // keep track of included components std::vector<bool> included; included.reserve(components.size()); for (unsigned int i = 0; i != components.size(); i++) { included.push_back(false); } for (std::vector<Chain>::iterator it = chains.begin(); it != chains.end(); it++) { for (std::vector<int>::iterator cit = it->components.begin(); cit != it->components.end(); cit++) { included[*cit] = true; } } std::vector<std::vector<Point2d> > componentsRed; for (unsigned int i = 0; i != components.size(); i++) { if (included[i]) { componentsRed.push_back(components[i]); } } IplImage * outTemp = cvCreateImage(cvGetSize(output), IPL_DEPTH_32F, 1); LOGL(LOG_CHAINS, componentsRed.size() << " components after chaining"); renderComponents(SWTImage, componentsRed, outTemp); bb = findBoundingBoxes(chains, compBB, outTemp); IplImage * out = cvCreateImage(cvGetSize(output), IPL_DEPTH_8U, 1); cvConvertScale(outTemp, out, 255, 0); cvCvtColor(out, output, CV_GRAY2RGB); cvReleaseImage(&out); cvReleaseImage(&outTemp); }
//------------------------------------------------------------------------------ // Color Similarity Matrix Calculation //------------------------------------------------------------------------------ CvMat *colorsim(int nbins, double sigma) { CvMat *xc=cvCreateMat(1,nbins, CV_32FC1); CvMat *yr=cvCreateMat(nbins,1, CV_32FC1); CvMat *x=cvCreateMat(nbins,nbins, CV_32FC1); CvMat *y=cvCreateMat(nbins,nbins, CV_32FC1); CvMat *m=cvCreateMat(x->rows,x->rows, CV_32FC1); // Set x,y directions for (int j=0;j<nbins;j++) { cvSetReal2D(xc,0,j,(j+1-0.5)/nbins); cvSetReal2D(yr,j,0,(j+1-0.5)/nbins); } // Set u,v, meshgrids for (int i=0;i<x->rows;i++) { cvRepeat(xc,x); cvRepeat(yr,y); } CvMat *sub = cvCreateMat(x->rows,y->cols,CV_32FC1); cvSub(x,y,sub); cvAbs(sub,sub); cvMul(sub,sub,sub); cvConvertScale(sub,sub,-1.0/(2*sigma*sigma)); cvExp(sub,sub); cvSubRS(sub,cvScalar(1.0),m); cvReleaseMat(&xc); cvReleaseMat(&yr); cvReleaseMat(&x); cvReleaseMat(&y); cvReleaseMat(&sub); return m; }
//============================================================================ void AAM_IC::InverseCompose(const CvMat* dpq, const CvMat* s, CvMat* NewS) { // Firstly: Estimate the corresponding changes to the base mesh cvConvertScale(dpq, __inv_pq, -1); __shape.CalcShape(__inv_pq, __update_s0); // __update_s0 = N.W(s0, -delta_p, -delta_q) //Secondly: Composing the Incremental Warp with the Current Warp Estimate. double *S0 = __update_s0->data.db; double *S = s->data.db; double *SEst = NewS->data.db; double x, y, xw, yw; int k, tri_idx; int v1, v2, v3; const std::vector<std::vector<int> >& tri = __paw.__tri; const std::vector<std::vector<int> >& vtri = __paw.__vtri; for(int i = 0; i < __shape.nPoints(); i++) { x = 0.0; y = 0.0; k = 0; //The only problem with this approach is which triangle do we use? //In general there will be several triangles that share the i-th vertex. for(k = 0; k < vtri[i].size(); k++)// see Figure (11) { tri_idx = vtri[i][k]; v1 = tri[tri_idx][0]; v2 = tri[tri_idx][1]; v3 = tri[tri_idx][2]; AAM_PAW::Warp(S0[2*i],S0[2*i+1], __sMean[v1].x, __sMean[v1].y,__sMean[v2].x, __sMean[v2].y,__sMean[v3].x, __sMean[v3].y, xw, yw, S[2*v1], S[2*v1+1], S[2*v2], S[2*v2+1], S[2*v3], S[2*v3+1]); x += xw; y += yw; } // average the result so as to smooth the warp at each vertex SEst[2*i] = x/k; SEst[2*i+1] = y/k; } }
static IplImage* splat(int *coeffs, CvSize size, int *plane_coeffs) { IplImage *g = cvCreateImage(size, IPL_DEPTH_16S, 1); IplImage *b = cvCreateImage(size, IPL_DEPTH_16S, 1); IplImage *r = cvCreateImage(size, IPL_DEPTH_16S, 1); IplImage *rgb = cvCreateImage(size, IPL_DEPTH_16S, 3); IplImage *img = cvCreateImage(size, IPL_DEPTH_8U, 3); IplImage *trans = cvCreateImage(size, IPL_DEPTH_16S, 1); int dim = plane_coeffs[0] + plane_coeffs[1] + plane_coeffs[2]; unsigned *order_p0 = build_path(plane_coeffs[0], KERNS); unsigned *order_p1 = build_path(plane_coeffs[1], KERNS); unsigned *order_p2 = build_path(plane_coeffs[2], KERNS); memset(trans->imageData, 0, trans->imageSize); dequantize(trans, plane_coeffs[0], order_p0, KERNS, coeffs, dim); iwht2d(trans, g); memset(trans->imageData, 0, trans->imageSize); dequantize(trans, plane_coeffs[1], order_p1, KERNS, coeffs+plane_coeffs[0], dim); iwht2d(trans, b); memset(trans->imageData, 0, trans->imageSize); dequantize(trans, plane_coeffs[2], order_p2, KERNS, coeffs+plane_coeffs[0]+plane_coeffs[1], dim); iwht2d(trans, r); cvMerge(g, b, r, NULL, rgb); cvConvertScale(rgb, img, 1, 0); cvReleaseImage(&g); cvReleaseImage(&b); cvReleaseImage(&r); cvReleaseImage(&rgb); cvReleaseImage(&trans); free(order_p0); free(order_p1); free(order_p2); return img; }
DMZ_INTERNAL void prepare_image_for_cat(IplImage *image, IplImage *as_float, CharacterRectListIterator rect) { // Input image: IPL_DEPTH_8U [0 - 255] // Data for models: IPL_DEPTH_32F [0.0 - 1.0] cvSetImageROI(image, cvRect(rect->left, rect->top, kTrimmedCharacterImageWidth, kTrimmedCharacterImageHeight)); // TODO: optimize this a lot! // Gradient IplImage *filtered_image = cvCreateImage(cvSize(kTrimmedCharacterImageWidth, kTrimmedCharacterImageHeight), IPL_DEPTH_8U, 1); //llcv_morph_grad3_2d_cross_u8(image, filtered_image); IplConvKernel *kernel = cvCreateStructuringElementEx(3, 3, 1, 1, CV_SHAPE_CROSS, NULL); cvMorphologyEx(image, filtered_image, NULL, kernel, CV_MOP_GRADIENT, 1); cvReleaseStructuringElement(&kernel); // Equalize llcv_equalize_hist(filtered_image, filtered_image); // Bilateral filter int aperture = 3; double space_sigma = (aperture / 2.0 - 1) * 0.3 + 0.8; double color_sigma = (aperture - 1) / 3.0; IplImage *smoothed_image = cvCreateImage(cvSize(kTrimmedCharacterImageWidth, kTrimmedCharacterImageHeight), IPL_DEPTH_8U, 1); cvSmooth(filtered_image, smoothed_image, CV_BILATERAL, aperture, aperture, space_sigma, color_sigma); // Convert to float cvConvertScale(smoothed_image, as_float, 1.0f / 255.0f, 0); cvReleaseImage(&smoothed_image); cvReleaseImage(&filtered_image); cvResetImageROI(image); #if DEBUG_EXPIRY_CATEGORIZATION_PERFORMANCE dmz_debug_timer_print("prepare image", 2); #endif }
int main( int argc, char** argv ) { IplImage* hsv_img; IplImage** hsv_ref_imgs; IplImage* l32f, * l; histogram* ref_histo; double max; int i; arg_parse( argc, argv ); /* compute HSV histogram over all reference image */ hsv_img = bgr2hsv( in_img ); hsv_ref_imgs = (IplImage**)malloc( num_ref_imgs * sizeof( IplImage* ) ); for( i = 0; i < num_ref_imgs; i++ ) hsv_ref_imgs[i] = bgr2hsv( ref_imgs[i] ); ref_histo = calc_histogram( hsv_ref_imgs, num_ref_imgs ); normalize_histogram( ref_histo ); /* compute likelihood at every pixel in input image */ fprintf( stderr, "Computing likelihood... " ); fflush( stderr ); l32f = likelihood_image( hsv_img, ref_imgs[0]->width, ref_imgs[0]->height, ref_histo ); fprintf( stderr, "done\n"); /* convert likelihood image to uchar and display */ cvMinMaxLoc( l32f, NULL, &max, NULL, NULL, NULL ); l = cvCreateImage( cvGetSize( l32f ), IPL_DEPTH_8U, 1 ); cvConvertScale( l32f, l, 255.0 / max, 0 ); cvNamedWindow( "likelihood", 1 ); cvShowImage( "likelihood", l ); cvNamedWindow( "image", 1 ); cvShowImage( "image", in_img ); cvWaitKey(0); }
void CamShift::Track(IplImage *frame, CvRect &selection, bool calc_hist) { int i, bin_w, c; cvCvtColor( frame, _hsv, CV_BGR2HSV ); cvInRangeS( _hsv, cvScalar(0,_smin,MIN(_vmin,_vmax),0), cvScalar(180,256,MAX(_vmin,_vmax),0), _mask ); cvSplit( _hsv, _hue, 0, 0, 0 ); if(calc_hist) { float max_val = 0.f; cvSetImageROI( _hue, selection ); cvSetImageROI( _mask, selection ); cvCalcHist( &_hue, _hist, 0, _mask ); cvGetMinMaxHistValue( _hist, 0, &max_val, 0, 0 ); cvConvertScale( _hist->bins, _hist->bins, max_val ? 255. / max_val : 0., 0 ); cvResetImageROI( _hue ); cvResetImageROI( _mask ); _track_window = selection; } cvCalcBackProject( &_hue, _backproject, _hist ); cvAnd( _backproject, _mask, _backproject, 0 ); cvCamShift( _backproject, _track_window, cvTermCriteria( CV_TERMCRIT_EPS | CV_TERMCRIT_ITER, 10, 1 ), &_track_comp, &_track_box ); _track_window = _track_comp.rect; if( frame->origin ) _track_box.angle = -_track_box.angle; selection = cvRect(_track_box.center.x-_track_box.size.width/2, _track_box.center.y-_track_box.size.height/2, selection.width, selection.height); }
static IplImage* get_convolution (const IplImage *image, const IplImage *filter) { CvSize dft_size; IplImage *reversed_image, *reversed_filter; IplImage *dft_image, *dft_filter, *dft_res; IplImage *res; dft_size.height = cvGetOptimalDFTSize(image->height + filter->height - 1); dft_size.width = cvGetOptimalDFTSize(image->width + filter->width - 1); res = cvCreateImage(cvSize(image->width, image->height), IPL_DEPTH_32F, N_CHANNELS_GRAY); reversed_image = cvCreateImage(cvGetSize(image), IPL_DEPTH_8U, N_CHANNELS_GRAY); reversed_filter = cvCreateImage(cvGetSize(filter), IPL_DEPTH_8U, N_CHANNELS_GRAY); cvNot(image, reversed_image); cvNot(filter, reversed_filter); dft_image = cvCreateImage(dft_size, IPL_DEPTH_32F, N_CHANNELS_GRAY); cvSet(dft_image, cvScalar(0, 0, 0, 0), NULL); dft_filter = cvCreateImage(dft_size, IPL_DEPTH_32F, N_CHANNELS_GRAY); cvSet(dft_filter, cvScalar(0, 0, 0, 0), NULL); cvSetImageROI(dft_image, cvRect(0, 0, reversed_image->width, reversed_image->height)); cvSetImageROI(dft_filter, cvRect(0, 0, reversed_filter->width, reversed_filter->height)); double scaling_factor = 1.0/255; cvConvertScale(reversed_image, dft_image, scaling_factor, 0); cvConvertScale(reversed_filter, dft_filter, scaling_factor, 0); cvResetImageROI(dft_image); cvResetImageROI(dft_filter); cvDFT(dft_image, dft_image, CV_DXT_FORWARD, image->height); cvDFT(dft_filter, dft_filter, CV_DXT_FORWARD, filter->height); dft_res = cvCreateImage(dft_size, IPL_DEPTH_32F, N_CHANNELS_GRAY); cvMulSpectrums(dft_image, dft_filter, dft_res, 0); cvDFT(dft_res, dft_res, CV_DXT_INVERSE, res->height); cvSetImageROI(dft_res, cvRect(0, 0, res->width, res->height)); cvCopy(dft_res, res, NULL); cvResetImageROI(dft_res); cvReleaseImage(&reversed_filter); cvReleaseImage(&reversed_image); cvReleaseImage(&dft_image); cvReleaseImage(&dft_filter); cvReleaseImage(&dft_res); return res; }
void icvCrossCorr( const CvArr* _img, const CvArr* _templ, CvArr* _corr, CvPoint anchor, double delta, int borderType ) { // disable OpenMP in the case of Visual Studio, // otherwise the performance drops significantly #undef USE_OPENMP #if !defined _MSC_VER || defined CV_ICC #define USE_OPENMP 1 #endif const double block_scale = 4.5; const int min_block_size = 256; cv::Ptr<CvMat> dft_img[CV_MAX_THREADS]; cv::Ptr<CvMat> dft_templ; std::vector<uchar> buf[CV_MAX_THREADS]; int k, num_threads = 0; CvMat istub, *img = (CvMat*)_img; CvMat tstub, *templ = (CvMat*)_templ; CvMat cstub, *corr = (CvMat*)_corr; CvSize dftsize, blocksize; int depth, templ_depth, corr_depth, max_depth = CV_32F, cn, templ_cn, corr_cn, buf_size = 0, tile_count_x, tile_count_y, tile_count; img = cvGetMat( img, &istub ); templ = cvGetMat( templ, &tstub ); corr = cvGetMat( corr, &cstub ); if( CV_MAT_DEPTH( img->type ) != CV_8U && CV_MAT_DEPTH( img->type ) != CV_16U && CV_MAT_DEPTH( img->type ) != CV_32F && CV_MAT_DEPTH( img->type ) != CV_64F ) CV_Error( CV_StsUnsupportedFormat, "The function supports only 8u, 16u and 32f data types" ); if( !CV_ARE_DEPTHS_EQ( img, templ ) && CV_MAT_DEPTH( templ->type ) != CV_32F ) CV_Error( CV_StsUnsupportedFormat, "Template (kernel) must be of the same depth as the input image, or be 32f" ); if( !CV_ARE_DEPTHS_EQ( img, corr ) && CV_MAT_DEPTH( corr->type ) != CV_32F && CV_MAT_DEPTH( corr->type ) != CV_64F ) CV_Error( CV_StsUnsupportedFormat, "The output image must have the same depth as the input image, or be 32f/64f" ); if( (!CV_ARE_CNS_EQ( img, corr ) || CV_MAT_CN(templ->type) > 1) && (CV_MAT_CN( corr->type ) > 1 || !CV_ARE_CNS_EQ( img, templ)) ) CV_Error( CV_StsUnsupportedFormat, "The output must have the same number of channels as the input (when the template has 1 channel), " "or the output must have 1 channel when the input and the template have the same number of channels" ); depth = CV_MAT_DEPTH(img->type); cn = CV_MAT_CN(img->type); templ_depth = CV_MAT_DEPTH(templ->type); templ_cn = CV_MAT_CN(templ->type); corr_depth = CV_MAT_DEPTH(corr->type); corr_cn = CV_MAT_CN(corr->type); CV_Assert( corr_cn == 1 || delta == 0 ); max_depth = MAX( max_depth, templ_depth ); max_depth = MAX( max_depth, depth ); max_depth = MAX( max_depth, corr_depth ); if( depth > CV_8U ) max_depth = CV_64F; /*if( img->cols < templ->cols || img->rows < templ->rows ) CV_Error( CV_StsUnmatchedSizes, "Such a combination of image and template/filter size is not supported" );*/ if( corr->rows > img->rows + templ->rows - 1 || corr->cols > img->cols + templ->cols - 1 ) CV_Error( CV_StsUnmatchedSizes, "output image should not be greater than (W + w - 1)x(H + h - 1)" ); blocksize.width = cvRound(templ->cols*block_scale); blocksize.width = MAX( blocksize.width, min_block_size - templ->cols + 1 ); blocksize.width = MIN( blocksize.width, corr->cols ); blocksize.height = cvRound(templ->rows*block_scale); blocksize.height = MAX( blocksize.height, min_block_size - templ->rows + 1 ); blocksize.height = MIN( blocksize.height, corr->rows ); dftsize.width = cvGetOptimalDFTSize(blocksize.width + templ->cols - 1); if( dftsize.width == 1 ) dftsize.width = 2; dftsize.height = cvGetOptimalDFTSize(blocksize.height + templ->rows - 1); if( dftsize.width <= 0 || dftsize.height <= 0 ) CV_Error( CV_StsOutOfRange, "the input arrays are too big" ); // recompute block size blocksize.width = dftsize.width - templ->cols + 1; blocksize.width = MIN( blocksize.width, corr->cols ); blocksize.height = dftsize.height - templ->rows + 1; blocksize.height = MIN( blocksize.height, corr->rows ); dft_templ = cvCreateMat( dftsize.height*templ_cn, dftsize.width, max_depth ); #ifdef USE_OPENMP num_threads = cvGetNumThreads(); #else num_threads = 1; #endif for( k = 0; k < num_threads; k++ ) dft_img[k] = cvCreateMat( dftsize.height, dftsize.width, max_depth ); if( templ_cn > 1 && templ_depth != max_depth ) buf_size = templ->cols*templ->rows*CV_ELEM_SIZE(templ_depth); if( cn > 1 && depth != max_depth ) buf_size = MAX( buf_size, (blocksize.width + templ->cols - 1)* (blocksize.height + templ->rows - 1)*CV_ELEM_SIZE(depth)); if( (corr_cn > 1 || cn > 1) && corr_depth != max_depth ) buf_size = MAX( buf_size, blocksize.width*blocksize.height*CV_ELEM_SIZE(corr_depth)); if( buf_size > 0 ) { for( k = 0; k < num_threads; k++ ) buf[k].resize(buf_size); } // compute DFT of each template plane for( k = 0; k < templ_cn; k++ ) { CvMat dstub, *src, *dst, temp; CvMat* planes[] = { 0, 0, 0, 0 }; int yofs = k*dftsize.height; src = templ; dst = cvGetSubRect( dft_templ, &dstub, cvRect(0,yofs,templ->cols,templ->rows)); if( templ_cn > 1 ) { planes[k] = templ_depth == max_depth ? dst : cvInitMatHeader( &temp, templ->rows, templ->cols, templ_depth, &buf[0][0] ); cvSplit( templ, planes[0], planes[1], planes[2], planes[3] ); src = planes[k]; planes[k] = 0; } if( dst != src ) cvConvert( src, dst ); if( dft_templ->cols > templ->cols ) { cvGetSubRect( dft_templ, dst, cvRect(templ->cols, yofs, dft_templ->cols - templ->cols, templ->rows) ); cvZero( dst ); } cvGetSubRect( dft_templ, dst, cvRect(0,yofs,dftsize.width,dftsize.height) ); cvDFT( dst, dst, CV_DXT_FORWARD + CV_DXT_SCALE, templ->rows ); } tile_count_x = (corr->cols + blocksize.width - 1)/blocksize.width; tile_count_y = (corr->rows + blocksize.height - 1)/blocksize.height; tile_count = tile_count_x*tile_count_y; #if defined _OPENMP && defined USE_OPENMP #pragma omp parallel for num_threads(num_threads) schedule(dynamic) #endif // calculate correlation by blocks for( k = 0; k < tile_count; k++ ) { #ifdef USE_OPENMP int thread_idx = cvGetThreadNum(); #else int thread_idx = 0; #endif int x = (k%tile_count_x)*blocksize.width; int y = (k/tile_count_x)*blocksize.height; int i, yofs; CvMat sstub, dstub, *src, *dst, temp; CvMat* planes[] = { 0, 0, 0, 0 }; CvMat* _dft_img = dft_img[thread_idx]; uchar* _buf = buf_size > 0 ? &buf[thread_idx][0] : 0; CvSize csz = { blocksize.width, blocksize.height }, isz; int x0 = x - anchor.x, y0 = y - anchor.y; int x1 = MAX( 0, x0 ), y1 = MAX( 0, y0 ), x2, y2; csz.width = MIN( csz.width, corr->cols - x ); csz.height = MIN( csz.height, corr->rows - y ); isz.width = csz.width + templ->cols - 1; isz.height = csz.height + templ->rows - 1; x2 = MIN( img->cols, x0 + isz.width ); y2 = MIN( img->rows, y0 + isz.height ); for( i = 0; i < cn; i++ ) { CvMat dstub1, *dst1; yofs = i*dftsize.height; src = cvGetSubRect( img, &sstub, cvRect(x1,y1,x2-x1,y2-y1) ); dst = cvGetSubRect( _dft_img, &dstub, cvRect(0,0,isz.width,isz.height) ); dst1 = dst; if( x2 - x1 < isz.width || y2 - y1 < isz.height ) dst1 = cvGetSubRect( _dft_img, &dstub1, cvRect( x1 - x0, y1 - y0, x2 - x1, y2 - y1 )); if( cn > 1 ) { planes[i] = dst1; if( depth != max_depth ) planes[i] = cvInitMatHeader( &temp, y2 - y1, x2 - x1, depth, _buf ); cvSplit( src, planes[0], planes[1], planes[2], planes[3] ); src = planes[i]; planes[i] = 0; } if( dst1 != src ) cvConvert( src, dst1 ); if( dst != dst1 ) cvCopyMakeBorder( dst1, dst, cvPoint(x1 - x0, y1 - y0), borderType ); if( dftsize.width > isz.width ) { cvGetSubRect( _dft_img, dst, cvRect(isz.width, 0, dftsize.width - isz.width,dftsize.height) ); cvZero( dst ); } cvDFT( _dft_img, _dft_img, CV_DXT_FORWARD, isz.height ); cvGetSubRect( dft_templ, dst, cvRect(0,(templ_cn>1?yofs:0),dftsize.width,dftsize.height) ); cvMulSpectrums( _dft_img, dst, _dft_img, CV_DXT_MUL_CONJ ); cvDFT( _dft_img, _dft_img, CV_DXT_INVERSE, csz.height ); src = cvGetSubRect( _dft_img, &sstub, cvRect(0,0,csz.width,csz.height) ); dst = cvGetSubRect( corr, &dstub, cvRect(x,y,csz.width,csz.height) ); if( corr_cn > 1 ) { planes[i] = src; if( corr_depth != max_depth ) { planes[i] = cvInitMatHeader( &temp, csz.height, csz.width, corr_depth, _buf ); cvConvertScale( src, planes[i], 1, delta ); } cvMerge( planes[0], planes[1], planes[2], planes[3], dst ); planes[i] = 0; } else { if( i == 0 ) cvConvertScale( src, dst, 1, delta ); else { if( max_depth > corr_depth ) { cvInitMatHeader( &temp, csz.height, csz.width, corr_depth, _buf ); cvConvert( src, &temp ); src = &temp; } cvAcc( src, dst ); } } } } }
void run() { int key; IplImage *mask = cvCreateImage( cvGetSize(img_gui), 8, 1 ); IplImage *display = cvCreateImage( cvGetSize(img_gui), 8, 3 ); IplImage *fgcolor = cvCloneImage( img_gui), *bgcolor = cvCloneImage( img_gui); cvSet( fgcolor, cvScalar( 255, 0, 0)), cvSet( bgcolor, cvScalar( 0, 255, 255)); // gui int swmin = _swmin; int swmax = std::max( std::max( img_gui->width, img_gui->height) / 8, 1); int swstep = std::max( std::min( img_gui->width, img_gui->height) / 100, 1); int swdefault = std::min( std::max( std::min( img_gui->width, img_gui->height) / 32, swmin), swmax); strokewidth = swdefault; RenderMsg( display); cvNamedWindow( "working space" ); cvNamedWindow( "trimap" ); cvShowImage( "working space" , display ); cvShowImage( "trimap" , usr); cvSetMouseCallback( "working space", DrawStroke); cvSetMouseCallback( "trimap", DrawStroke); while(1) { key = cvWaitKey(5); if(key=='q') break; else if(key=='w') { stroketype++; stroketype = stroketype % 3; printf("%d\n", stroketype); } else if( key == 'd') // decrease stroke width strokewidth = ( strokewidth - swstep < swmin) ? swmin : strokewidth - swstep; else if( key == 'a') // increase stroke width strokewidth = ( strokewidth + swstep > swmax) ? swmax : strokewidth + swstep; else if( key == 'u' && flashOnlyImg_gui!=NULL ) { T += T_step; FlashMatting::GenerateTrimap( flashOnlyImg_gui, usr, T); } else if( key == 'i' && flashOnlyImg_gui!=NULL ) { T -= T_step; FlashMatting::GenerateTrimap( flashOnlyImg_gui, usr, T); } // display cvCopy( img_gui, display ); cvCmpS( usr, _strokeColor[_strokebg], mask, CV_CMP_EQ); cvOr( img_gui, bgcolor, display, mask); cvCmpS( usr, _strokeColor[_strokefg], mask, CV_CMP_EQ); cvOr( img_gui, fgcolor, display, mask); cvConvertScale( display, display, 0.7); //cvCmpS( usr, _strokeColor[_strokeu], mask, CV_CMP_EQ); //cvCopy( img_gui, display, mask); RenderMsg( display); cvShowImage( "working space", display); cvShowImage( "trimap" , usr); } cvReleaseImage( &display ); cvDestroyAllWindows(); }
QRect WebCamData::detectFace(const char* faceData) { if (d->hasFace) { trackFace(); return QRect(); } if (!d->mCascade) { qDebug() << Q_FUNC_INFO << ": " << "Error incorrect Haar classifier cascade"; return QRect(); } CvRect* rect = 0; int faceSize = d->data->width / 5; d->mFaceSeq = cvHaarDetectObjects(d->data, d->mCascade, d->mFaceStore, 1.1, 6, CV_HAAR_DO_CANNY_PRUNING, cvSize(faceSize, faceSize)); // qDebug() << "Number of Faces Detected" << d->mFaceSeq->total; if (d->mFaceSeq && d->mFaceSeq->total) { rect = (CvRect*) cvGetSeqElem(d->mFaceSeq, 0); d->hasFace = true; int radius = cvRound((rect->width + rect->height) * 0.25); CvPoint center; center.x = cvRound(rect->x + rect->width * 0.5); center.y = cvRound(rect->y + rect->height * 0.5); //qDebug() << "Radius : " << radius << " X: " << center.x << "Y : " << center.y; //histogram float max = 0.f; float range[] = {0, 180}; float* ranges = range; int bins = 30; d->hsvImage = cvCreateImage(cvGetSize(d->data), 8, 3); d->hueImage = cvCreateImage(cvGetSize(d->data), 8, 1); d->mask = cvCreateImage(cvGetSize(d->data), 8, 1); d->prob = cvCreateImage(cvGetSize(d->data), 8, 1); d->histogram = cvCreateHist(1, &bins, CV_HIST_ARRAY, &ranges, 1); updateHugeImage(d->data); cvSetImageROI(d->hueImage, *rect); cvSetImageROI(d->mask, *rect); cvCalcHist(&d->hueImage, d->histogram, 0, d->mask); cvGetMinMaxHistValue(d->histogram, 0, &max, 0, 0); cvConvertScale(d->histogram->bins, d->histogram->bins, max ? 255.0 / max : 0, 0); cvResetImageROI(d->hueImage); cvResetImageROI(d->mask); d->faceRect = *rect; /* */ } return QRect(); }
bool photometric_calibration(CalibModel &model, CvCapture *capture, int nbImages, bool cache) { if (cache) model.map.load(); const char *win = "BazAR"; IplImage*gray=0; cvNamedWindow(win, CV_WINDOW_AUTOSIZE); cvNamedWindow("LightMap", CV_WINDOW_AUTOSIZE); IplImage* frame = 0; IplImage* display=cvCloneImage(cvQueryFrame(capture)); int nbHomography =0; LightCollector lc(model.map.reflc); IplImage *lightmap = cvCreateImage(cvGetSize(model.map.map.getIm()), IPL_DEPTH_8U, lc.avgChannels); while (1) { // acquire image frame = cvQueryFrame( capture ); /* if (frame) cvReleaseImage(&frame); frame = cvLoadImage("model.bmp",1); */ if( !frame ) break; // convert it to gray levels, if required if (frame->nChannels >1) { if( !gray ) gray = cvCreateImage( cvGetSize(frame), IPL_DEPTH_8U, 1 ); cvCvtColor(frame, gray, CV_RGB2GRAY); } else { gray = frame; } // run the detector if (model.detector.detect(gray)) { // 2d homography found nbHomography++; // Computes 3D pose and surface normal model.augm.Clear(); add_detected_homography(model.detector, model.augm); model.augm.Accomodate(4, 1e-4); CvMat *mat = model.augm.GetObjectToWorld(); float normal[3]; for (int j=0;j<3;j++) normal[j] = cvGet2D(mat, j, 2).val[0]; cvReleaseMat(&mat); // average pixels over triangles lc.averageImage(frame,model.detector.H); // add observations if (!model.map.isReady()) model.map.addNormal(normal, lc, 0); if (!model.map.isReady() && nbHomography >= nbImages) { if (model.map.computeLightParams()) { model.map.save(); const float *gain = model.map.getGain(0); const float *bias = model.map.getBias(0); cout << "Gain: " << gain[0] << ", " << gain[1] << ", " << gain[2] << endl; cout << "Bias: " << bias[0] << ", " << bias[1] << ", " << bias[2] << endl; } } } if (model.map.isReady()) { double min, max; IplImage *map = model.map.map.getIm(); cvSetImageCOI(map, 2); cvMinMaxLoc(map, &min, &max); cvSetImageCOI(map, 0); assert(map->nChannels == lightmap->nChannels); cvConvertScale(map, lightmap, 128, 0); cvShowImage("LightMap", lightmap); augment_scene(model, frame, display); } else { cvCopy(frame,display); if (model.detector.object_is_detected) lc.drawGrid(display, model.detector.H); } cvShowImage(win, display); int k=cvWaitKey(10); if (k=='q' || k== 27) break; } cvReleaseImage(&lightmap); cvReleaseImage(&display); if (frame->nChannels > 1) cvReleaseImage(&gray); return 0; }
void StdDCT() { allocateImages(); if(!cvIm32F) { cvIm32Fin = cvCreateImage(cvGetSize(cvImGray), IPL_DEPTH_32F, 1); cvIm32F = cvCreateImage(cvGetSize(cvImGray), IPL_DEPTH_32F, 1); cvImDCT = cvCreateImage(cvGetSize(cvImGray), IPL_DEPTH_32F, 1); } cvConvertScale(cvImGray, cvIm32Fin); cvCopy(cvIm32Fin, cvIm32F); cvDCT(cvIm32F, cvImDCT, CV_DXT_FORWARD); /* OUTPUTS * char * LogDCT_outputnames_list[] = { "LogDCT", "Log DCT Cropped", "Log DCT Inv", "DCT Inv", "Final"}; */ if(StdDCT_output.curitem == 0) // "StdDCT" { cvConvertScale(cvImDCT, cvImGray, 1.); } // Reduce low DCT float rad = (float)StdDCT_radius / 100.f; for(int r = 0; r<cvImDCT->height; r++) { float fr = (float)r / (float)cvImDCT->height; if(fr > rad) { float * line = (float *)(cvImDCT->imageData + r*cvImDCT->widthStep); for(int c = 0; c<cvImDCT->width; c++) { float fc = (float)c / (float)cvImDCT->width; // float dc = fc*fc; // float dr = fr*fr; if(fc > rad) { line[c] = 0; } } } } if(StdDCT_output.curitem == 1) // "Log DCT Cropped" { cvConvertScale(cvImDCT, cvImGray, 1.); } cvDCT(cvImDCT, cvIm32F, CV_DXT_INVERSE); if(StdDCT_output.curitem == 2) // "StdDCT Inv" { cvConvertScale(cvIm32F, cvImGray, 1.); } cvConvertScale(cvIm32F, cvImDCT, LogDCT_coef); if(StdDCT_output.curitem == 3) // "DCT Inv" { cvConvertScale(cvImDCT, cvImGray, 1.); } // Substract low pass image from input image cvSub(cvIm32Fin, cvImDCT, cvIm32F); if(StdDCT_output.curitem == 4) // "DCT Inv - scal" { cvConvertScale(cvIm32F, cvImGray, 1.); } cvAddS(cvIm32F, cvScalarAll(LogDCT_add), cvImDCT); if(StdDCT_output.curitem == 5) // "Out" { cvConvertScale(cvImDCT, cvImGray, 1.); } finishImages(); }
bool AgentDetector::updateModule() { LockGuard lg(m); bool isRefreshed = client.getDepthAndPlayers(depth,players); client.getRgb(rgb); bool tracked; if (handleMultiplePlayers) tracked=client.getJoints(joints); else tracked=client.getJoints(joint, EFAA_KINECT_CLOSEST_PLAYER); //cout<<"Tracking value = "<<tracked<<endl; if (tracked) { if (handleMultiplePlayers) client.getSkeletonImage(joints,skeletonImage); else { client.getSkeletonImage(joint,skeletonImage); joints.clear(); joints.push_back(joint); } } client.getPlayersImage(players,playersImage); client.getDepthImage(depth,depthToDisplay); if (depthPort.getOutputCount()>0) { depthPort.prepare()=depthToDisplay; depthPort.write(); } if (imagePort.getOutputCount()>0) { imagePort.prepare()=rgb; imagePort.write(); } if (playersPort.getOutputCount()>0) { playersPort.prepare()=playersImage; playersPort.write(); } if (skeletonPort.getOutputCount()>0) { skeletonPort.prepare()=skeletonImage; skeletonPort.write(); } if (showImages) { cvConvertScale((IplImage*)depthToDisplay.getIplImage(),depthTmp,1.0/255); cvCvtColor((IplImage*)rgb.getIplImage(),rgbTmp,CV_BGR2RGB); string mode=showMode; string submode; while (!mode.empty()) { if (showImageParser(mode,submode)) { if (submode=="rgb") cvShowImage("rgb",rgbTmp); else if (submode=="depth") cvShowImage("depth",depthTmp); else if (submode=="skeleton") cvShowImage("skeleton",(IplImage*)skeletonImage.getIplImage()); else if (submode=="players") cvShowImage("players",(IplImage*)playersImage.getIplImage()); else yError("unrecognized show mode!"); } } cvWaitKey(1); } //Send the players information to the OPC //Allow click calibration if (!checkCalibration()) { if (AgentDetector::clicked==clicked_left) { AgentDetector::clicked=idle; //Get the clicked point coordinate in Kinect space Vector clickedPoint(3); cout<<"Processing a click on ("<<AgentDetector::clickX<<" "<<AgentDetector::clickY<<") --> "; client.get3DPoint((int)AgentDetector::clickX,(int)AgentDetector::clickY,clickedPoint); cout<<clickedPoint.toString(3,3)<<endl; Bottle bCond; Bottle bObject; Bottle bRTObject; bObject.addString(EFAA_OPC_ENTITY_TAG); bObject.addString("=="); bObject.addString(EFAA_OPC_ENTITY_OBJECT); bRTObject.addString(EFAA_OPC_ENTITY_TAG); bRTObject.addString("=="); bRTObject.addString(EFAA_OPC_ENTITY_RTOBJECT); Bottle bPresent; bPresent.addString(EFAA_OPC_OBJECT_PRESENT_TAG); bPresent.addString("=="); bPresent.addDouble(1.0); bCond.addList()=bObject; bCond.addString("&&"); bCond.addList()=bPresent; bCond.addString("||"); bCond.addList()=bRTObject; bCond.addString("&&"); bCond.addList()=bPresent; opc->checkout(); opc->isVerbose=true; list<Entity*> presentObjects=opc->Entities(bCond); opc->isVerbose=false; Object *o=nullptr; if (presentObjects.size()==1) { o=dynamic_cast<Object*>(presentObjects.front()); } else { for(auto& presentObject : presentObjects) { if(presentObject->name() == "target") { o=dynamic_cast<Object*>(presentObject); break; } } } if(o) { Bottle botRPH, botRPHRep; botRPH.addString("add"); botRPH.addString("kinect"); Bottle &cooKinect=botRPH.addList(); cooKinect.addDouble(clickedPoint[0]); cooKinect.addDouble(clickedPoint[1]); cooKinect.addDouble(clickedPoint[2]); Bottle &cooiCub=botRPH.addList(); cooiCub.addDouble(o->m_ego_position[0]); cooiCub.addDouble(o->m_ego_position[1]); cooiCub.addDouble(o->m_ego_position[2]); rfh.write(botRPH,botRPHRep); cout<<"Sent to RFH: "<<botRPH.toString().c_str()<<endl; cout<<"Got from RFH: "<<botRPHRep.toString().c_str()<<endl; pointsCnt++; } else { yWarning("There should be 1 and only 1 object on the table"); yWarning("If there is more than one object, the object you want"); yWarning("to calibrate must be called \"target\""); } } else if (AgentDetector::clicked==clicked_right) { AgentDetector::clicked=idle; if (pointsCnt>=3) { Bottle calibBottle,calibReply; calibBottle.addString("cal"); calibBottle.addString("kinect"); rfh.write(calibBottle,calibReply); cout<<"Calibrated ! "<<calibReply.toString().c_str()<<endl; calibBottle.clear(); calibBottle.addString("save"); rfh.write(calibBottle,calibReply); cout<<"Saved to file ! "<<calibReply.toString().c_str()<<endl; checkCalibration(); } else yWarning("Unable to calibrate with less than 3 points pairs collected"); } } if (isRefreshed) { // yInfo() << " refreshed"; ////////////////////////////////////////////////////////////////// //Clear the previous agents //for(map<int, Agent*>::iterator pA=identities.begin(); pA!=identities.end() ; pA++) //{ // pA->second->m_present = 0.0; //} //partner->m_present = 0.0; // check if last apparition was more than dThreshlodDisaparition ago if (tracked) { //Go through all skeletons for(deque<Player>::iterator p=joints.begin(); p!=joints.end(); p++) { //check if this skeletton is really tracked bool reallyTracked = false; for(map<string,Joint>::iterator jnt = p->skeleton.begin() ; jnt != p->skeleton.end() ; jnt++) { if (jnt->second.x != 0 && jnt->second.y != 0 && jnt->second.z != 0) { reallyTracked = true; break; } } if (reallyTracked) { dSince = (clock() - dTimingLastApparition) / (double) CLOCKS_PER_SEC; //yInfo() << " is REALLY tracked"; string playerName = partner_default_name; //If the skeleton is tracked we dont identify if (identities.find(p->ID) != identities.end()) { playerName = identities[p->ID]; } else { //Check if we should learn this face if (currentTrainingFace != "") { setIdentity(*p,currentTrainingFace); currentTrainingFace = ""; } //if (useFaceRecognition) playerName = getIdentity(*p); } //We interact with OPC only if the calibration is done if (isCalibrated) { //main bottle to be streamed with loc of all agent body part Bottle& bAgentLoc = agentLocOutPort.prepare(); bAgentLoc.clear(); //Retrieve this player in OPC or create if does not exist opc->checkout(); partner = opc->addOrRetrieveEntity<Agent>(partner_default_name); partner->m_present = 1.0; // reset the timing. dTimingLastApparition = clock(); if (identities.find(p->ID) == identities.end()) { cout<<"Assigning name "<<playerName<<" to skeleton "<<p->ID<<endl; //Agent* specificAgent = opc->addEntity<Agent>(playerName); Agent* specificAgent = opc->addOrRetrieveEntity<Agent>(playerName); if(specificAgent == nullptr) { yError() << "SHIT specificAgent"; } else { identities[p->ID] = specificAgent->name(); specificAgent->m_present = 1.0; yInfo() << " specific agent is commited"; opc->commit(specificAgent); yInfo() << " specific agent is commited done"; } } // Relation r(partner->name(),"named",playerName); // opc->addRelation(r,1.0); // cout<<"Commiting : "<<r.toString()<<endl; yarp::os::Bottle &skeleton = outputSkeletonPort.prepare(); skeleton.clear(); //Convert the skeleton into efaaHelpers body. We loose orientation in the process... for(map<string,Joint>::iterator jnt = p->skeleton.begin() ; jnt != p->skeleton.end() ; jnt++) { Bottle bBodyPartLoc; Vector kPosition(4); kPosition[0] = jnt->second.x; kPosition[1] = jnt->second.y; kPosition[2] = jnt->second.z; kPosition[3] = 1; Vector icubPos = kinect2icub * kPosition; Vector irPos = icubPos.subVector(0,2); if (isMounted) { irPos = transform2IR(irPos); Bottle jntBtl; jntBtl.clear(); jntBtl.addString(jnt->first); jntBtl.addDouble(jnt->second.x); jntBtl.addDouble(jnt->second.y); jntBtl.addDouble(jnt->second.z); skeleton.addList() = jntBtl; } if (jnt->first == EFAA_OPC_BODY_PART_TYPE_HEAD) { partner->m_ego_position = irPos; } partner->m_body.m_parts[jnt->first] = irPos; bBodyPartLoc.addString(jnt->first); bBodyPartLoc.addString(irPos.toString()); bAgentLoc.addList() = bBodyPartLoc; } agentLocOutPort.write(); opc->commit(partner); // cout << skeleton.toString()<< endl; outputSkeletonPort.write(); //opc->commit(agent); } // cout<<'1'<<endl; } } } else { if (dSince > dThresholdDisparition) { opc->checkout(); partner = opc->addOrRetrieveEntity<Agent>(partner_default_name); partner->m_present = 0.0; opc->commit(partner); } else { //yInfo() << " clock is: " << clock() << "\t last apparition: " << dTimingLastApparition << "\t dSince: " << dSince; //yInfo() << " agent dissapeared but not for too long."; } } } return true; }
/* log_weight_div_det[k] = -2*log(weights_k) + log(det(Sigma_k))) covs[k] = cov_rotate_mats[k] * cov_eigen_values[k] * (cov_rotate_mats[k])' cov_rotate_mats[k] are orthogonal matrices of eigenvectors and cov_eigen_values[k] are diagonal matrices (represented by 1D vectors) of eigen values. The <alpha_ik> is the probability of the vector x_i to belong to the k-th cluster: <alpha_ik> ~ weights_k * exp{ -0.5[ln(det(Sigma_k)) + (x_i - mu_k)' Sigma_k^(-1) (x_i - mu_k)] } We calculate these probabilities here by the equivalent formulae: Denote S_ik = -0.5(log(det(Sigma_k)) + (x_i - mu_k)' Sigma_k^(-1) (x_i - mu_k)) + log(weights_k), M_i = max_k S_ik = S_qi, so that the q-th class is the one where maximum reaches. Then alpha_ik = exp{ S_ik - M_i } / ( 1 + sum_j!=q exp{ S_ji - M_i }) */ double CvEM::run_em( const CvVectors& train_data ) { CvMat* centered_sample = 0; CvMat* covs_item = 0; CvMat* log_det = 0; CvMat* log_weights = 0; CvMat* cov_eigen_values = 0; CvMat* samples = 0; CvMat* sum_probs = 0; log_likelihood = -DBL_MAX; CV_FUNCNAME( "CvEM::run_em" ); __BEGIN__; int nsamples = train_data.count, dims = train_data.dims, nclusters = params.nclusters; double min_variation = FLT_EPSILON; double min_det_value = MAX( DBL_MIN, pow( min_variation, dims )); double likelihood_bias = -CV_LOG2PI * (double)nsamples * (double)dims / 2., _log_likelihood = -DBL_MAX; int start_step = params.start_step; int i, j, k, n; int is_general = 0, is_diagonal = 0, is_spherical = 0; double prev_log_likelihood = -DBL_MAX / 1000., det, d; CvMat whdr, iwhdr, diag, *w, *iw; double* w_data; double* sp_data; if( nclusters == 1 ) { double log_weight; CV_CALL( cvSet( probs, cvScalar(1.)) ); if( params.cov_mat_type == COV_MAT_SPHERICAL ) { d = cvTrace(*covs).val[0]/dims; d = MAX( d, FLT_EPSILON ); inv_eigen_values->data.db[0] = 1./d; log_weight = pow( d, dims*0.5 ); } else { w_data = inv_eigen_values->data.db; if( params.cov_mat_type == COV_MAT_GENERIC ) cvSVD( *covs, inv_eigen_values, *cov_rotate_mats, 0, CV_SVD_U_T ); else cvTranspose( cvGetDiag(*covs, &diag), inv_eigen_values ); cvMaxS( inv_eigen_values, FLT_EPSILON, inv_eigen_values ); for( j = 0, det = 1.; j < dims; j++ ) det *= w_data[j]; log_weight = sqrt(det); cvDiv( 0, inv_eigen_values, inv_eigen_values ); } log_weight_div_det->data.db[0] = -2*log(weights->data.db[0]/log_weight); log_likelihood = DBL_MAX/1000.; EXIT; } if( params.cov_mat_type == COV_MAT_GENERIC ) is_general = 1; else if( params.cov_mat_type == COV_MAT_DIAGONAL ) is_diagonal = 1; else if( params.cov_mat_type == COV_MAT_SPHERICAL ) is_spherical = 1; /* In the case of <cov_mat_type> == COV_MAT_DIAGONAL, the k-th row of cov_eigen_values contains the diagonal elements (variations). In the case of <cov_mat_type> == COV_MAT_SPHERICAL - the 0-ths elements of the vectors cov_eigen_values[k] are to be equal to the mean of the variations over all the dimensions. */ CV_CALL( log_det = cvCreateMat( 1, nclusters, CV_64FC1 )); CV_CALL( log_weights = cvCreateMat( 1, nclusters, CV_64FC1 )); CV_CALL( covs_item = cvCreateMat( dims, dims, CV_64FC1 )); CV_CALL( centered_sample = cvCreateMat( 1, dims, CV_64FC1 )); CV_CALL( cov_eigen_values = cvCreateMat( inv_eigen_values->rows, inv_eigen_values->cols, CV_64FC1 )); CV_CALL( samples = cvCreateMat( nsamples, dims, CV_64FC1 )); CV_CALL( sum_probs = cvCreateMat( 1, nclusters, CV_64FC1 )); sp_data = sum_probs->data.db; // copy the training data into double-precision matrix for( i = 0; i < nsamples; i++ ) { const float* src = train_data.data.fl[i]; double* dst = (double*)(samples->data.ptr + samples->step*i); for( j = 0; j < dims; j++ ) dst[j] = src[j]; } if( start_step != START_M_STEP ) { for( k = 0; k < nclusters; k++ ) { if( is_general || is_diagonal ) { w = cvGetRow( cov_eigen_values, &whdr, k ); if( is_general ) cvSVD( covs[k], w, cov_rotate_mats[k], 0, CV_SVD_U_T ); else cvTranspose( cvGetDiag( covs[k], &diag ), w ); w_data = w->data.db; for( j = 0, det = 1.; j < dims; j++ ) det *= w_data[j]; if( det < min_det_value ) { if( start_step == START_AUTO_STEP ) det = min_det_value; else EXIT; } log_det->data.db[k] = det; } else { d = cvTrace(covs[k]).val[0]/(double)dims; if( d < min_variation ) { if( start_step == START_AUTO_STEP ) d = min_variation; else EXIT; } cov_eigen_values->data.db[k] = d; log_det->data.db[k] = d; } } cvLog( log_det, log_det ); if( is_spherical ) cvScale( log_det, log_det, dims ); } for( n = 0; n < params.term_crit.max_iter; n++ ) { if( n > 0 || start_step != START_M_STEP ) { // e-step: compute probs_ik from means_k, covs_k and weights_k. CV_CALL(cvLog( weights, log_weights )); // S_ik = -0.5[log(det(Sigma_k)) + (x_i - mu_k)' Sigma_k^(-1) (x_i - mu_k)] + log(weights_k) for( k = 0; k < nclusters; k++ ) { CvMat* u = cov_rotate_mats[k]; const double* mean = (double*)(means->data.ptr + means->step*k); w = cvGetRow( cov_eigen_values, &whdr, k ); iw = cvGetRow( inv_eigen_values, &iwhdr, k ); cvDiv( 0, w, iw ); w_data = (double*)(inv_eigen_values->data.ptr + inv_eigen_values->step*k); for( i = 0; i < nsamples; i++ ) { double *csample = centered_sample->data.db, p = log_det->data.db[k]; const double* sample = (double*)(samples->data.ptr + samples->step*i); double* pp = (double*)(probs->data.ptr + probs->step*i); for( j = 0; j < dims; j++ ) csample[j] = sample[j] - mean[j]; if( is_general ) cvGEMM( centered_sample, u, 1, 0, 0, centered_sample, CV_GEMM_B_T ); for( j = 0; j < dims; j++ ) p += csample[j]*csample[j]*w_data[is_spherical ? 0 : j]; pp[k] = -0.5*p + log_weights->data.db[k]; // S_ik <- S_ik - max_j S_ij if( k == nclusters - 1 ) { double max_val = 0; for( j = 0; j < nclusters; j++ ) max_val = MAX( max_val, pp[j] ); for( j = 0; j < nclusters; j++ ) pp[j] -= max_val; } } } CV_CALL(cvExp( probs, probs )); // exp( S_ik ) cvZero( sum_probs ); // alpha_ik = exp( S_ik ) / sum_j exp( S_ij ), // log_likelihood = sum_i log (sum_j exp(S_ij)) for( i = 0, _log_likelihood = likelihood_bias; i < nsamples; i++ ) { double* pp = (double*)(probs->data.ptr + probs->step*i), sum = 0; for( j = 0; j < nclusters; j++ ) sum += pp[j]; sum = 1./MAX( sum, DBL_EPSILON ); for( j = 0; j < nclusters; j++ ) { double p = pp[j] *= sum; sp_data[j] += p; } _log_likelihood -= log( sum ); } // check termination criteria if( fabs( (_log_likelihood - prev_log_likelihood) / prev_log_likelihood ) < params.term_crit.epsilon ) break; prev_log_likelihood = _log_likelihood; } // m-step: update means_k, covs_k and weights_k from probs_ik cvGEMM( probs, samples, 1, 0, 0, means, CV_GEMM_A_T ); for( k = 0; k < nclusters; k++ ) { double sum = sp_data[k], inv_sum = 1./sum; CvMat* cov = covs[k], _mean, _sample; w = cvGetRow( cov_eigen_values, &whdr, k ); w_data = w->data.db; cvGetRow( means, &_mean, k ); cvGetRow( samples, &_sample, k ); // update weights_k weights->data.db[k] = sum; // update means_k cvScale( &_mean, &_mean, inv_sum ); // compute covs_k cvZero( cov ); cvZero( w ); for( i = 0; i < nsamples; i++ ) { double p = probs->data.db[i*nclusters + k]*inv_sum; _sample.data.db = (double*)(samples->data.ptr + samples->step*i); if( is_general ) { cvMulTransposed( &_sample, covs_item, 1, &_mean ); cvScaleAdd( covs_item, cvRealScalar(p), cov, cov ); } else for( j = 0; j < dims; j++ ) { double val = _sample.data.db[j] - _mean.data.db[j]; w_data[is_spherical ? 0 : j] += p*val*val; } } if( is_spherical ) { d = w_data[0]/(double)dims; d = MAX( d, min_variation ); w->data.db[0] = d; log_det->data.db[k] = d; } else { if( is_general ) cvSVD( cov, w, cov_rotate_mats[k], 0, CV_SVD_U_T ); cvMaxS( w, min_variation, w ); for( j = 0, det = 1.; j < dims; j++ ) det *= w_data[j]; log_det->data.db[k] = det; } } cvConvertScale( weights, weights, 1./(double)nsamples, 0 ); cvMaxS( weights, DBL_MIN, weights ); cvLog( log_det, log_det ); if( is_spherical ) cvScale( log_det, log_det, dims ); } // end of iteration process //log_weight_div_det[k] = -2*log(weights_k/det(Sigma_k))^0.5) = -2*log(weights_k) + log(det(Sigma_k))) if( log_weight_div_det ) { cvScale( log_weights, log_weight_div_det, -2 ); cvAdd( log_weight_div_det, log_det, log_weight_div_det ); } /* Now finalize all the covariation matrices: 1) if <cov_mat_type> == COV_MAT_DIAGONAL we used array of <w> as diagonals. Now w[k] should be copied back to the diagonals of covs[k]; 2) if <cov_mat_type> == COV_MAT_SPHERICAL we used the 0-th element of w[k] as an average variation in each cluster. The value of the 0-th element of w[k] should be copied to the all of the diagonal elements of covs[k]. */ if( is_spherical ) { for( k = 0; k < nclusters; k++ ) cvSetIdentity( covs[k], cvScalar(cov_eigen_values->data.db[k])); } else if( is_diagonal ) { for( k = 0; k < nclusters; k++ ) cvTranspose( cvGetRow( cov_eigen_values, &whdr, k ), cvGetDiag( covs[k], &diag )); } cvDiv( 0, cov_eigen_values, inv_eigen_values ); log_likelihood = _log_likelihood; __END__; cvReleaseMat( &log_det ); cvReleaseMat( &log_weights ); cvReleaseMat( &covs_item ); cvReleaseMat( ¢ered_sample ); cvReleaseMat( &cov_eigen_values ); cvReleaseMat( &samples ); cvReleaseMat( &sum_probs ); return log_likelihood; }
float CvEM::predict( const CvMat* _sample, CvMat* _probs ) const { float* sample_data = 0; void* buffer = 0; int allocated_buffer = 0; int cls = 0; CV_FUNCNAME( "CvEM::predict" ); __BEGIN__; int i, k, dims; int nclusters; int cov_mat_type = params.cov_mat_type; double opt = FLT_MAX; size_t size; CvMat diff, expo; dims = means->cols; nclusters = params.nclusters; CV_CALL( cvPreparePredictData( _sample, dims, 0, params.nclusters, _probs, &sample_data )); // allocate memory and initializing headers for calculating size = sizeof(double) * (nclusters + dims); if( size <= CV_MAX_LOCAL_SIZE ) buffer = cvStackAlloc( size ); else { CV_CALL( buffer = cvAlloc( size )); allocated_buffer = 1; } expo = cvMat( 1, nclusters, CV_64FC1, buffer ); diff = cvMat( 1, dims, CV_64FC1, (double*)buffer + nclusters ); // calculate the probabilities for( k = 0; k < nclusters; k++ ) { const double* mean_k = (const double*)(means->data.ptr + means->step*k); const double* w = (const double*)(inv_eigen_values->data.ptr + inv_eigen_values->step*k); double cur = log_weight_div_det->data.db[k]; CvMat* u = cov_rotate_mats[k]; // cov = u w u' --> cov^(-1) = u w^(-1) u' if( cov_mat_type == COV_MAT_SPHERICAL ) { double w0 = w[0]; for( i = 0; i < dims; i++ ) { double val = sample_data[i] - mean_k[i]; cur += val*val*w0; } } else { for( i = 0; i < dims; i++ ) diff.data.db[i] = sample_data[i] - mean_k[i]; if( cov_mat_type == COV_MAT_GENERIC ) cvGEMM( &diff, u, 1, 0, 0, &diff, CV_GEMM_B_T ); for( i = 0; i < dims; i++ ) { double val = diff.data.db[i]; cur += val*val*w[i]; } } expo.data.db[k] = cur; if( cur < opt ) { cls = k; opt = cur; } /* probability = (2*pi)^(-dims/2)*exp( -0.5 * cur ) */ } if( _probs ) { CV_CALL( cvConvertScale( &expo, &expo, -0.5 )); CV_CALL( cvExp( &expo, &expo )); if( _probs->cols == 1 ) CV_CALL( cvReshape( &expo, &expo, 0, nclusters )); CV_CALL( cvConvertScale( &expo, _probs, 1./cvSum( &expo ).val[0] )); } __END__; if( sample_data != _sample->data.fl ) cvFree( &sample_data ); if( allocated_buffer ) cvFree( &buffer ); return (float)cls; }
gint compose_skin_matrix(IplImage* rgbin, IplImage* gray_out) { /* int skin_under_seed = 0; static IplImage* imageHSV = cvCreateImage( cvSize(rgbin->width, rgbin->height), IPL_DEPTH_8U, 3); cvCvtColor(rgbin, imageHSV, CV_RGB2HSV); static IplImage* planeH = cvCreateImage( cvGetSize(imageHSV), 8, 1); // Hue component. ///IplImage* planeH2= cvCreateImage( cvGetSize(imageHSV), 8, 1); // Hue component, 2nd threshold static IplImage* planeS = cvCreateImage( cvGetSize(imageHSV), 8, 1); // Saturation component. static IplImage* planeV = cvCreateImage( cvGetSize(imageHSV), 8, 1); // Brightness component. cvCvtPixToPlane(imageHSV, planeH, planeS, planeV, 0); // Extract the 3 color components. // Detect which pixels in each of the H, S and V channels are probably skin pixels. // Assume that skin has a Hue between 0 to 18 (out of 180), and Saturation above 50, and Brightness above 80. ///cvThreshold(planeH , planeH2, 10, UCHAR_MAX, CV_THRESH_BINARY); //(hue > 10) cvThreshold(planeH , planeH , 20, UCHAR_MAX, CV_THRESH_BINARY_INV); //(hue < 20) cvThreshold(planeS , planeS , 48, UCHAR_MAX, CV_THRESH_BINARY); //(sat > 48) cvThreshold(planeV , planeV , 80, UCHAR_MAX, CV_THRESH_BINARY); //(val > 80) // erode the HUE to get rid of noise. cvErode(planeH, planeH, NULL, 1); // Combine all 3 thresholded color components, so that an output pixel will only // be white (255) if the H, S and V pixels were also white. // gray_out = (hue > 10) ^ (hue < 20) ^ (sat > 48) ^ (val > 80), where ^ mean pixels-wise AND cvAnd(planeH , planeS , gray_out); //cvAnd(gray_out, planeH2, gray_out); cvAnd(gray_out, planeV , gray_out); return(skin_under_seed); */ static IplImage* planeR = cvCreateImage( cvGetSize(rgbin), 8, 1); // R component. static IplImage* planeG = cvCreateImage( cvGetSize(rgbin), 8, 1); // G component. static IplImage* planeB = cvCreateImage( cvGetSize(rgbin), 8, 1); // B component. static IplImage* planeAll = cvCreateImage( cvGetSize(rgbin), IPL_DEPTH_32F, 1); // (R+G+B) component. static IplImage* planeR2 = cvCreateImage( cvGetSize(rgbin), IPL_DEPTH_32F, 1); // R component, 32bits static IplImage* planeRp = cvCreateImage( cvGetSize(rgbin), IPL_DEPTH_32F, 1); // R' and >0.4 static IplImage* planeGp = cvCreateImage( cvGetSize(rgbin), IPL_DEPTH_32F, 1); // G' and > 0.28 static IplImage* planeRp2 = cvCreateImage( cvGetSize(rgbin), IPL_DEPTH_32F, 1); // R' <0.6 static IplImage* planeGp2 = cvCreateImage( cvGetSize(rgbin), IPL_DEPTH_32F, 1); // G' <0.4 cvCvtPixToPlane(rgbin, planeR, planeG, planeB, 0); // Extract the 3 color components. cvAdd( planeR, planeG, planeAll, NULL); cvAdd( planeB, planeAll, planeAll, NULL); // All = R + G + B cvDiv( planeR, planeAll, planeRp, 1.0); // R' = R / ( R + G + B) cvDiv( planeG, planeAll, planeGp, 1.0); // G' = G / ( R + G + B) cvConvertScale( planeR, planeR2, 1.0, 0.0); cvCopy(planeGp, planeGp2, NULL); cvCopy(planeRp, planeRp2, NULL); cvThreshold(planeR2 , planeR2, 60, UCHAR_MAX, CV_THRESH_BINARY); //(R > 60) cvThreshold(planeRp , planeRp, 0.40, UCHAR_MAX, CV_THRESH_BINARY); //(R'> 0.4) cvThreshold(planeRp2, planeRp2, 0.6, UCHAR_MAX, CV_THRESH_BINARY_INV); //(R'< 0.6) cvThreshold(planeGp , planeGp, 0.28, UCHAR_MAX, CV_THRESH_BINARY); //(G'> 0.28) cvThreshold(planeGp2, planeGp2, 0.4, UCHAR_MAX, CV_THRESH_BINARY_INV); //(G'< 0.4) // R’ = R / (R+G+B), G’ = G / (R + G + B) // Skin pixel if: // R > 60 AND R’ > 0.4 AND R’ < 0.6 AND G’ > 0.28 and G’ < 0.4 static IplImage* imageSkinPixels = cvCreateImage( cvGetSize(rgbin), IPL_DEPTH_32F, 1); cvAnd( planeR2 , planeRp , imageSkinPixels); cvAnd( planeRp , imageSkinPixels , imageSkinPixels); cvAnd( planeRp2, imageSkinPixels , imageSkinPixels); cvAnd( planeGp , imageSkinPixels , imageSkinPixels); cvAnd( planeGp2, imageSkinPixels , imageSkinPixels); cvConvertScale( imageSkinPixels, gray_out, 1.0, 0.0); return(0); }
//------------------------------------------------------------------------------ //------------------------------------------------------------------------------ CvMat *tgso (CvMat &tmap, int ntex, double sigma, double theta, CvMat &tsim, int useChi2) { CvMat *roundTmap=cvCreateMat(tmap.rows,tmap.cols,CV_32FC1); CvMat *comp=cvCreateMat(tmap.rows,tmap.cols,CV_32FC1); for (int i=0;i<tmap.rows;i++) for (int j=0;j<tmap.cols;j++) cvSetReal2D(roundTmap,i,j,cvRound(cvGetReal2D(&tmap,i,j))); cvSub(&tmap,roundTmap,comp); if (cvCountNonZero(comp)) { printf("texton labels not integral"); cvReleaseMat(&roundTmap); cvReleaseMat(&comp); exit(1); } double min,max; cvMinMaxLoc(&tmap,&min,&max); if (min<1 && max>ntex) { char *msg=new char[50]; printf(msg,"texton labels out of range [1,%d]",ntex); cvReleaseMat(&roundTmap); cvReleaseMat(&comp); exit(1); } cvReleaseMat(&roundTmap); cvReleaseMat(&comp); double wr=floor(sigma); //sigma=radius (Leo) CvMat *x=cvCreateMat(1,wr-(-wr)+1, CV_64FC1); CvMat *y=cvCreateMat(wr-(-wr)+1,1, CV_64FC1); CvMat *u=cvCreateMat(wr-(-wr)+1,wr-(-wr)+1, CV_64FC1); CvMat *v=cvCreateMat(wr-(-wr)+1,wr-(-wr)+1, CV_64FC1); CvMat *gamma=cvCreateMat(u->rows,v->rows, CV_64FC1); // Set x,y directions for (int j=-wr;j<=wr;j++) { cvSetReal2D(x,0,(j+wr),j); cvSetReal2D(y,(j+wr),0,j); } // Set u,v, meshgrids for (int i=0;i<u->rows;i++) { cvRepeat(x,u); cvRepeat(y,v); } // Compute the gamma matrix from the grid for (int i=0;i<u->rows;i++) for (int j=0;j<u->cols;j++) cvSetReal2D(gamma,i,j,atan2(cvGetReal2D(v,i,j),cvGetReal2D(u,i,j))); cvReleaseMat(&x); cvReleaseMat(&y); CvMat *sum=cvCreateMat(u->rows,u->cols, CV_64FC1); cvMul(u,u,u); cvMul(v,v,v); cvAdd(u,v,sum); CvMat *mask=cvCreateMat(u->rows,u->cols, CV_8UC1); cvCmpS(sum,sigma*sigma,mask,CV_CMP_LE); cvConvertScale(mask,mask,1.0/255); cvSetReal2D(mask,wr,wr,0); int count=cvCountNonZero(mask); cvReleaseMat(&u); cvReleaseMat(&v); cvReleaseMat(&sum); CvMat *sub=cvCreateMat(mask->rows,mask->cols, CV_64FC1); CvMat *side=cvCreateMat(mask->rows,mask->cols, CV_8UC1); cvSubS(gamma,cvScalar(theta),sub); cvReleaseMat(&gamma); for (int i=0;i<mask->rows;i++){ for (int j=0;j<mask->cols;j++) { double n=cvmGet(sub,i,j); double n_mod = n-floor(n/(2*M_PI))*2*M_PI; cvSetReal2D(side,i,j, 1 + int(n_mod < M_PI)); } } cvMul(side,mask,side); cvReleaseMat(&sub); cvReleaseMat(&mask); CvMat *lmask=cvCreateMat(side->rows,side->cols, CV_8UC1); CvMat *rmask=cvCreateMat(side->rows,side->cols, CV_8UC1); cvCmpS(side,1,lmask,CV_CMP_EQ); cvCmpS(side,2,rmask,CV_CMP_EQ); int count1=cvCountNonZero(lmask), count2=cvCountNonZero(rmask); if (count1 != count2) { printf("Bug: imbalance\n"); } CvMat *rlmask=cvCreateMat(side->rows,side->cols, CV_32FC1); CvMat *rrmask=cvCreateMat(side->rows,side->cols, CV_32FC1); cvConvertScale(lmask,rlmask,1.0/(255*count)*2); cvConvertScale(rmask,rrmask,1.0/(255*count)*2); cvReleaseMat(&lmask); cvReleaseMat(&rmask); cvReleaseMat(&side); int h=tmap.rows; int w=tmap.cols; CvMat *d = cvCreateMat(h*w,ntex,CV_32FC1); CvMat *coltemp = cvCreateMat(h*w,1,CV_32FC1); CvMat *tgL = cvCreateMat(h,w, CV_32FC1); CvMat *tgR = cvCreateMat(h,w, CV_32FC1); CvMat *temp = cvCreateMat(h,w,CV_8UC1); CvMat *im = cvCreateMat(h,w, CV_32FC1); CvMat *sub2 = cvCreateMat(h,w,CV_32FC1); CvMat *sub2t = cvCreateMat(w,h,CV_32FC1); CvMat *prod = cvCreateMat(h*w,ntex,CV_32FC1); CvMat reshapehdr,*reshape; CvMat* tgL_pad = cvCreateMat(h+rlmask->rows-1,w+rlmask->cols-1,CV_32FC1); CvMat* tgR_pad = cvCreateMat(h+rlmask->rows-1,w+rlmask->cols-1,CV_32FC1); CvMat* im_pad = cvCreateMat(h+rlmask->rows-1,w+rlmask->cols-1,CV_32FC1); CvMat *tg=cvCreateMat(h,w,CV_32FC1); cvZero(tg); if (useChi2 == 1){ CvMat* temp_add1 = cvCreateMat(h,w,CV_32FC1); for (int i=0;i<ntex;i++) { cvCmpS(&tmap,i+1,temp,CV_CMP_EQ); cvConvertScale(temp,im,1.0/255); cvCopyMakeBorder(tgL,tgL_pad,cvPoint((rlmask->cols-1)/2,(rlmask->rows-1)/2),IPL_BORDER_CONSTANT); cvCopyMakeBorder(tgR,tgR_pad,cvPoint((rlmask->cols-1)/2,(rlmask->rows-1)/2),IPL_BORDER_CONSTANT); cvCopyMakeBorder(im,im_pad,cvPoint((rlmask->cols-1)/2,(rlmask->rows-1)/2),IPL_BORDER_CONSTANT); cvFilter2D(im_pad,tgL_pad,rlmask,cvPoint((rlmask->cols-1)/2,(rlmask->rows-1)/2)); cvFilter2D(im_pad,tgR_pad,rrmask,cvPoint((rlmask->cols-1)/2,(rlmask->rows-1)/2)); cvGetSubRect(tgL_pad,tgL,cvRect((rlmask->cols-1)/2,(rlmask->rows-1)/2,tgL->cols,tgL->rows)); cvGetSubRect(tgR_pad,tgR,cvRect((rlmask->cols-1)/2,(rlmask->rows-1)/2,tgR->cols,tgR->rows)); cvSub(tgL,tgR,sub2); cvPow(sub2,sub2,2.0); cvAdd(tgL,tgR,temp_add1); cvAddS(temp_add1,cvScalar(0.0000000001),temp_add1); cvDiv(sub2,temp_add1,sub2); cvAdd(tg,sub2,tg); } cvScale(tg,tg,0.5); cvReleaseMat(&temp_add1); } else{// if not chi^2 for (int i=0;i<ntex;i++) { cvCmpS(&tmap,i+1,temp,CV_CMP_EQ); cvConvertScale(temp,im,1.0/255); cvCopyMakeBorder(tgL,tgL_pad,cvPoint((rlmask->cols-1)/2,(rlmask->rows-1)/2),IPL_BORDER_CONSTANT); cvCopyMakeBorder(tgR,tgR_pad,cvPoint((rlmask->cols-1)/2,(rlmask->rows-1)/2),IPL_BORDER_CONSTANT); cvCopyMakeBorder(im,im_pad,cvPoint((rlmask->cols-1)/2,(rlmask->rows-1)/2),IPL_BORDER_CONSTANT); cvFilter2D(im_pad,tgL_pad,rlmask,cvPoint((rlmask->cols-1)/2,(rlmask->rows-1)/2)); cvFilter2D(im_pad,tgR_pad,rrmask,cvPoint((rlmask->cols-1)/2,(rlmask->rows-1)/2)); cvGetSubRect(tgL_pad,tgL,cvRect((rlmask->cols-1)/2,(rlmask->rows-1)/2,tgL->cols,tgL->rows)); cvGetSubRect(tgR_pad,tgR,cvRect((rlmask->cols-1)/2,(rlmask->rows-1)/2,tgR->cols,tgR->rows)); cvSub(tgL,tgR,sub2); cvAbs(sub2,sub2); cvTranspose(sub2,sub2t); reshape=cvReshape(sub2t,&reshapehdr,0,h*w); cvGetCol(d,coltemp,i); cvCopy(reshape,coltemp); } cvMatMul(d,&tsim,prod); cvMul(prod,d,prod); CvMat *sumcols=cvCreateMat(h*w,1,CV_32FC1); cvSetZero(sumcols); for (int i=0;i<prod->cols;i++) { cvGetCol(prod,coltemp,i); cvAdd(sumcols,coltemp,sumcols); } reshape=cvReshape(sumcols,&reshapehdr,0,w); cvTranspose(reshape,tg); cvReleaseMat(&sumcols); } //Smooth the gradient now!! tg=fitparab(*tg,sigma,sigma/4,theta); cvMaxS(tg,0,tg); cvReleaseMat(&im_pad); cvReleaseMat(&tgL_pad); cvReleaseMat(&tgR_pad); cvReleaseMat(&rlmask); cvReleaseMat(&rrmask); cvReleaseMat(&im); cvReleaseMat(&tgL); cvReleaseMat(&tgR); cvReleaseMat(&temp); cvReleaseMat(&coltemp); cvReleaseMat(&sub2); cvReleaseMat(&sub2t); cvReleaseMat(&d); cvReleaseMat(&prod); return tg; }