int main() { int blocksize=20; //float threshold=0.001; float threshold=-1; int xlimit=20; int ylimit=20; IplImage *xdst; IplImage *ydst; IplImage* aft=cvLoadImage("scaled-2/aft-scaled-4.jpg",CV_LOAD_IMAGE_COLOR); IplImage* fore=cvLoadImage("scaled-2/fore-4.jpg",CV_LOAD_IMAGE_COLOR); //IplImage* aft=cvLoadImage("scene_l.pgm",CV_LOAD_IMAGE_COLOR); //IplImage* fore=cvLoadImage("scene_r.pgm",CV_LOAD_IMAGE_COLOR); //IplImage* aft=cvLoadImage("Corners_aft.png",CV_LOAD_IMAGE_COLOR); //IplImage* fore=cvLoadImage("Corners_fore.png",CV_LOAD_IMAGE_COLOR); IplImage* dst; dst=createImage(aft->width,aft->height,3); xdst=createImage(aft->width,aft->height,1); ydst=createImage(aft->width,aft->height,1); cvNormalize(aft,aft,0,255,CV_MINMAX); cvNormalize(fore,fore,0,255,CV_MINMAX); //cvSmooth(aft,aft,CV_GAUSSIAN,3,0,0,0); //cvSmooth(fore,fore,CV_GAUSSIAN,3,0,0,0); StereoExhaustiveBM(aft,fore,dst,xdst,ydst,blocksize,threshold,xlimit,ylimit); cvSaveImage("XDisparity.jpg",xdst); cvSaveImage("YDisparity.jpg",ydst); printf("\nImages Are Saved"); //cvNamedWindow("Disparity",CV_WINDOW_AUTOSIZE); //cvShowImage("Disparity",dst); //cvWaitKey(0); //cvDestroyWindow("Disparity"); return 0; }
void Grab_Camera_Frames() { // russ unsigned ii,jj; char indat[256*1024]; // huge because I am a lazy man char *indatloc; unsigned readcnt; unsigned totallen; uchar *eye_image_loc, *scene_image_loc; // Russ: Glasses support added here. readuntilchar(stdin,SYMBOL_SOF); indat[0] = readchar(stdin); assert( (OPCODE_FRAME == (unsigned char)indat[0]) || (SYMBOL_EXIT == (unsigned char)indat[0]) ); if(SYMBOL_EXIT == (unsigned char)indat[0]) { Close_Logfile(); Close_GUI(); exit(0); } totallen=0; indatloc=indat; while((RESOLUTION_WIDTH*RESOLUTION_HEIGHT)*2 > totallen) { readcnt = fread(indatloc,1,((RESOLUTION_WIDTH*RESOLUTION_HEIGHT)*2)-totallen,stdin); totallen+=readcnt; indatloc+=readcnt; } *indatloc = '\0'; for(ii=0; ii < RESOLUTION_WIDTH; ++ii) { eye_image_loc = (uchar*)(eye_image->imageData + (ii*eye_image->widthStep)); scene_image_loc = (uchar*)(scene_image_grayscale->imageData + (ii*scene_image_grayscale->widthStep)); for(jj = 0; jj < RESOLUTION_HEIGHT; ++jj) { eye_image_loc[jj] = (uchar)indat[((2*ii)*RESOLUTION_WIDTH)+jj]; scene_image_loc[jj] = (uchar)indat[((2*ii+1)*RESOLUTION_WIDTH)+jj]; } } cvNormalize(eye_image,eye_image,0,255,CV_MINMAX,CV_8UC1); cvNormalize(scene_image_grayscale,scene_image_grayscale,0,255,CV_MINMAX,CV_8UC1); cvCvtColor(scene_image_grayscale, scene_image, CV_GRAY2RGB); original_eye_image = cvCloneImage(eye_image); if (frame_number == 0) { Calculate_Avg_Intensity_Hori(eye_image); memcpy(intensity_factor_hori, avg_intensity_hori, eye_image->height*sizeof(double)); } frame_number++; }
////////////////////////////////// // doPCA() // void faceRecognition::doPCA() { int i; CvTermCriteria calcLimit; CvSize faceImgSize; // set the number of eigenvalues to use nEigens = nTrainFaces-1; // allocate the eigenvector images faceImgSize.width = faceImgArr[0]->width; faceImgSize.height = faceImgArr[0]->height; eigenVectArr = (IplImage**)cvAlloc(sizeof(IplImage*) * nEigens); for(i=0; i<nEigens; i++) eigenVectArr[i] = cvCreateImage(faceImgSize, IPL_DEPTH_32F, 1); // allocate the eigenvalue array eigenValMat = cvCreateMat( 1, nEigens, CV_32FC1 ); // allocate the averaged image pAvgTrainImg = cvCreateImage(faceImgSize, IPL_DEPTH_32F, 1); // set the PCA termination criterion calcLimit = cvTermCriteria( CV_TERMCRIT_ITER, nEigens, 1); // compute average image, eigenvalues, and eigenvectors cvCalcEigenObjects( nTrainFaces, // Number of source objects. (void*)faceImgArr, // ... (void*)eigenVectArr, CV_EIGOBJ_NO_CALLBACK, //Input/output flags. 0, //Input/output buffer size in bytes. The size is zero, if unknown. 0, //Pointer to the structure that contains all necessary data for the callback functions. &calcLimit, //Determines conditions for the calculation to be finished. pAvgTrainImg, //Averaged object. eigenValMat->data.fl); //Pointer to the eigenvalues array in the descending order;may be NULL. //根据norm_type的不同值,图像src将被规范化 cvNormalize(eigenValMat, eigenValMat, 1, 0, CV_L1, 0); }
void doPCA() { int i; CvTermCriteria calcLimit; CvSize faceImgSize; numberOfEigenVectors = numberOfTrainingFaces-1; faceImgSize.width = faceImageArray[0]->width; faceImgSize.height = faceImageArray[0]->height; eigenVectorArray = (IplImage**)cvAlloc(sizeof(IplImage*) * numberOfEigenVectors); for (i=0; i<numberOfEigenVectors; i++) eigenVectorArray[i] = cvCreateImage(faceImgSize, IPL_DEPTH_32F, 1); eigenValueMatrix = cvCreateMat( 1, numberOfEigenVectors, CV_32FC1 ); averageTrainingImage = cvCreateImage(faceImgSize, IPL_DEPTH_32F, 1); calcLimit = cvTermCriteria( CV_TERMCRIT_ITER, numberOfEigenVectors, 1); cvCalcEigenObjects( numberOfTrainingFaces, (void*)faceImageArray, (void*)eigenVectorArray, CV_EIGOBJ_NO_CALLBACK, 0, 0, &calcLimit, averageTrainingImage, eigenValueMatrix->data.fl); cvNormalize(eigenValueMatrix, eigenValueMatrix, 1, 0, CV_L1, 0); }
bool S_CFeature::GetHOGHistogram_Patch(IplImage* img, double hog_hist[]) { if (img == NULL) { return NULL; } else { //HOGDescriptor *hog=new HOGDescriptor(cvSize(SIZEs,SIZEs),cvSize(16,16),cvSize(8,8),cvSize(8,8),9); //(cvSize(SIZEs,SIZEs),cvSize(16,16),cvSize(8,8),cvSize(8,8),9) HOGDescriptor *hog=new HOGDescriptor(cvSize(SIZEs,SIZEs),cvSize(32,32),cvSize(16,16),cvSize(16,16),9); //dimension 324 /////////////////////window: 64*64£¬block: 8*8£¬block step:4*4£¬cell: 4*4 cvNormalize(img,img,255,0,CV_MINMAX,0); //Add by Hanjie Wang. 2013-03. //LBP(img,img); Mat handMat(img); vector<float> *descriptors = new std::vector<float>(); hog->compute(handMat, *descriptors,Size(0,0), Size(0,0)); ////////////////////window: 0*0 double total=0; int i; for(i=0;i<descriptors->size();i++) total+=abs((*descriptors)[i]); // total=sqrt(total); for(i=0;i<descriptors->size();i++) { //hog_hist.push_back((*descriptors)[i]/total); hog_hist[i] = (*descriptors)[i]/total; } return true; } }
IplImage* preProcessImages(const IplImage* input, int minSize, int maxSize) { int width = input->width; int height = input->height; int minSide; double ratio; if(width < height) minSide = width; else minSide = height; if(minSide < minSize) ratio = (double)minSize / (double)minSide; else if(minSide > maxSize) ratio = (double)maxSize / (double)minSide; else ratio = 1.0; IplImage* temp = cvCreateImage(cvSize(width*ratio, height*ratio), input->depth, input->nChannels); IplImage* output = cvCreateImage(cvSize(width*ratio, height*ratio), input->depth, input->nChannels); //Resize based on the ratio cvResize(input, temp, CV_INTER_AREA); //Equalize the histograms of the images //cvEqualizeHist(temp, output); cvNormalize(temp, output, 0, 255, CV_MINMAX); cvReleaseImage(&temp); return output; }
void calculateHOG_block(CvRect block, CvMat* hog_block, IplImage** integrals,CvSize cell, int normalization) { int cell_start_x, cell_start_y; CvMat vector_cell; int startcol = 0; for (cell_start_y = block.y; cell_start_y <= block.y + block.height - cell.height; cell_start_y += cell.height) { for (cell_start_x = block.x; cell_start_x <= block.x + block.width - cell.width; cell_start_x += cell.width) { cvGetCols(hog_block, &vector_cell, startcol, startcol + 9); calculateHOG_rect(cvRect(cell_start_x, cell_start_y, cell.width, cell.height), &vector_cell, integrals, -1); startcol += 9; } } if (normalization != -1) cvNormalize(hog_block, hog_block, 1, 0, normalization); }
IplImage* knt_get_image_IR(kinect_t *k){ XnStatus nRetVal = XN_STATUS_OK; //Creamos una imagen de profundidad de 8 bits y un canal IplImage* imgk = cvCreateImage(cvSize(640, 480), IPL_DEPTH_8U, 1); XnIRPixel* pIRMap; int i = 0, j = 0; //Esperamos a que nos devuelva los datos nRetVal = xnWaitOneUpdateAll(k->pContext, k->Node_Image); if(nRetVal != XN_STATUS_OK){ printf("Error actualizando información de IR: %s\n", xnGetStatusString(nRetVal)); } //Obtenemos la información pIRMap = xnGetIRMap( k->Node_Image); for( i = 0; i < 480; i++) for( j = 0; j < 640; j++){ ((uchar *)(imgk->imageData + i*imgk->widthStep))[j] = (*pIRMap)>>4; ++pIRMap; } cvNormalize(imgk, imgk, 255, 0, CV_MINMAX); return imgk; }
void Kinect::depth8BitImage(IplImage *img) { if(_depthImg->width == img->width && _depthImg->height == img->height && img->depth == 8) { EnterCriticalSection(&_csDepth); cvNormalize(_depthImg,img,0.0,255.0,CV_MINMAX,NULL); LeaveCriticalSection(&_csDepth); } }
void output_filter_bank(FilterBank *fb) { int i = 0; for (int bw = 0; bw < N_BANDWIDTHS; bw++) { for (int frq = 0; frq < N_FREQS; frq++) { for (int orn = 0; orn < N_ORIENTATIONS; orn++) { char out_file_name[256]; sprintf(out_file_name, "%s/%s_%02d_%02.2f_%02.2f.png", OUTPUT_PATH, "FILTER", bandwidths[bw], spatial_frequencies[frq], orientations[orn]); puts(out_file_name); CvMat *out = cvClone(fb->filters[i]->real); cvNormalize(out, out, 255, 0, CV_MINMAX, NULL); cvSaveImage(out_file_name, out, NULL); cvReleaseMat(&out); i++; } } } }
void extractHaarFeatures(const IplImage* img, Mat& haar) { CvSize size = cvSize(IMAGE_RESIZE, IMAGE_RESIZE); CvSize size2 = cvSize(INTEGRAL_SIZE, INTEGRAL_SIZE); CvSize img_size = cvGetSize(img); IplImage* ipl= cvCreateImage(img_size,8,0); if(img->nChannels==3) { cvCvtColor(img,ipl,CV_BGR2GRAY); } else { cvCopy(img,ipl,0); } if((size.width!=img_size.width)|| (size.height!=img_size.height)) { IplImage* tmpsize=cvCreateImage(size,IPL_DEPTH_8U,0); cvResize(ipl,tmpsize,CV_INTER_LINEAR); cvReleaseImage( &ipl); ipl=cvCreateImage(size,IPL_DEPTH_8U,0); cvCopy(tmpsize,ipl,0); cvReleaseImage( &tmpsize); } IplImage* temp = cvCreateImage(size, IPL_DEPTH_64F, 0); cvCvtScale(ipl,temp); cvNormalize(temp, temp, 0, 1, CV_MINMAX); haar.release(); haar = Mat::zeros(1, NUM_HAAR_FEATURES, CV_32FC1); IplImage* integral=cvCreateImage(size2,IPL_DEPTH_64F,0); CvMat * sqSum = cvCreateMat(temp->height + 1, temp->width + 1, CV_64FC1); cvIntegral(temp, integral, sqSum); cvReleaseMat(&sqSum); int actualSize = 0; // top left for(int i = 0; i < 100; i+= 10) { for(int j = 0; j < 100; j+= 10) { // bottom right for(int m = i+10; m<=100; m+=10) { for(int n = j+10; n<=100; n+=10) { haar.at<float>(0, actualSize++) = getIntegralRectValue(integral, i, j, m, n); } } } } cvReleaseImage(&ipl); cvReleaseImage(&temp); cvReleaseImage(&integral); }
/** * @brief CvGabor::conv_img(IplImage *src, IplImage *dst, int Type) * @param src * @param dst * @param Type */ void CvGabor::conv_img(IplImage *src, IplImage *dst, int Type) //函数名:conv_img { // printf("CvGabor::conv_img 1\n"); double ve; //, re,im; CvMat *mat = cvCreateMat(src->width, src->height, CV_32FC1); for (int i = 0; i < src->width; i++) { for (int j = 0; j < src->height; j++) { ve = CV_IMAGE_ELEM(src, uchar, j, i); //CV_IMAGE_ELEM 是取图像(j,i)位置的像素值 CV_MAT_ELEM(*mat, float, i, j) = (float)ve; //转化成float 类型 } } // printf("CvGabor::conv_img 2\n"); CvMat *rmat = cvCreateMat(src->width, src->height, CV_32FC1); CvMat *imat = cvCreateMat(src->width, src->height, CV_32FC1); switch (Type) { case CV_GABOR_REAL: cvFilter2D( (CvMat*)mat, (CvMat*)mat, (CvMat*)Real, cvPoint( (Width-1)/2, (Width-1)/2)); break; case CV_GABOR_IMAG: cvFilter2D( (CvMat*)mat, (CvMat*)mat, (CvMat*)Imag, cvPoint( (Width-1)/2, (Width-1)/2)); break; case CV_GABOR_MAG: cvFilter2D( (CvMat*)mat, (CvMat*)rmat, (CvMat*)Real, cvPoint( (Width-1)/2, (Width-1)/2)); cvFilter2D( (CvMat*)mat, (CvMat*)imat, (CvMat*)Imag, cvPoint( (Width-1)/2, (Width-1)/2)); cvPow(rmat,rmat,2); cvPow(imat,imat,2); cvAdd(imat,rmat,mat); cvPow(mat,mat,0.5); break; case CV_GABOR_PHASE: break; } // printf("CvGabor::conv_img 3\n"); if (dst->depth == IPL_DEPTH_8U) { cvNormalize((CvMat*)mat, (CvMat*)mat, 0, 255, CV_MINMAX); for (int i = 0; i < mat->rows; i++) { for (int j = 0; j < mat->cols; j++) { ve = CV_MAT_ELEM(*mat, float, i, j); CV_IMAGE_ELEM(dst, uchar, j, i) = (uchar)cvRound(ve); } } }
int main(int argc, const char * argv[]) { IplImage *src, *templ, *ftmp[6]; int i; if (argc == 3 && ((src = cvLoadImage(argv[1], 1)) != 0) && ((templ = cvLoadImage(argv[2], 1)) != 0)) { // Allocate output images int i_width = src->width - templ->width + 1; int i_height = src->height - templ->height + 1; for ( i = 0; i < 6; i++ ) { ftmp[i] = cvCreateImage( cvSize(i_width, i_height), 32, 1 ); } // Do template matching for ( i = 0; i < 6; i++) { cvMatchTemplate( src, templ, ftmp[i], i ); cvNormalize( ftmp[i], ftmp[i], 1, 0, CV_MINMAX ); } //DISPLAY cvNamedWindow( "Template", 0 ); cvShowImage( "Template", templ ); cvNamedWindow( "Image", 0 ); cvShowImage( "Image", src ); cvNamedWindow( "SQDIFF", 0 ); cvShowImage( "SQDIFF", ftmp[0] ); cvNamedWindow( "SQDIFF_NORMED", 0 ); cvShowImage( "SQDIFF_NORMED", ftmp[1] ); cvNamedWindow( "CCORR", 0 ); cvShowImage( "CCORR", ftmp[2] ); cvNamedWindow( "CCORR_NORMED", 0 ); cvShowImage( "CCORR_NORMED", ftmp[3] ); cvNamedWindow( "CCOEFF", 0 ); cvShowImage( "CCOEFF", ftmp[4] ); cvNamedWindow( "CCOEFF_NORMED", 0 ); cvShowImage( "CCOEFF_NORMED", ftmp[5] ); cvWaitKey(); } return 0; }
ofVec2f Simple3DTracker::_predictNextPosition(ofVec2f currentPosition, float* minCost) { int bestx = currentPosition.x, besty = currentPosition.y; float bestcost = 9999999, cost, distance; const float alpha = _weightedMatchingCoefficient; if(!_template || !_tmp || !_tmp2) return currentPosition; // template matching IplImage* haystack = _cameraImage(); cvMatchTemplate(haystack, _template->getCvImage(), _tmp2, CV_TM_CCOEFF); cvNormalize(_tmp2, _tmp2, 1.0, 0.0, CV_MINMAX); // find the best match for(int y = 0; y < _tmp2->height; y++) { const float *src = (const float*)(_tmp2->imageData + y * _tmp2->widthStep); unsigned char *dst = (unsigned char*)(_tmp->getCvImage()->imageData + y * _tmp->getCvImage()->widthStep); for(int x = 0; x < _tmp2->width; x++) { dst[x] = (unsigned char)(src[x] * 255.0f); distance = currentPosition.distance(ofVec2f(x, y)); if(distance <= _lookupRadius) { cost = (alpha * (1.0f - src[x])) + ((1.0f - alpha) * distance / _lookupRadius); if(cost <= bestcost) { // weighted matching bestx = x; besty = y; bestcost = cost; } } } } _tmp->flagImageChanged(); // get the resulting position... ofVec2f result(bestx + _template->width/2, besty + _template->height/2); // return the min cost? if(minCost) *minCost = bestcost; // update the template? if(result.distance(currentPosition) >= UPDATETPL_MINDIST) _setTemplate(result); // done! return result; }
int StereoVision::stereoProcess(CvArr* imageSrcLeft,CvArr* imageSrcRight) { if(!calibrationDone) return RESULT_FAIL; if(!imagesRectified[0]) imagesRectified[0] = cvCreateMat( imageSize.height,imageSize.width, CV_8U ); if(!imagesRectified[1]) imagesRectified[1] = cvCreateMat( imageSize.height,imageSize.width, CV_8U ); if(!imageDepth) imageDepth = cvCreateMat( imageSize.height,imageSize.width, CV_16S ); if(!imageDepthNormalized) imageDepthNormalized = cvCreateMat( imageSize.height,imageSize.width, CV_8U ); // 2013.4.29 // CZT // if(!imageDepthNormalized_c1) imageDepthNormalized_c1 = cvCreateMat( 512, 512, CV_8U ); /*if(!imagesRectified[0]) imagesRectified[0] = cvCreateMat( 512, 512, CV_8U ); if(!imagesRectified[1]) imagesRectified[1] = cvCreateMat( 512, 512, CV_8U ); if(!imageDepth) imageDepth = cvCreateMat( 512, 512, CV_16S ); if(!imageDepthNormalized) imageDepthNormalized = cvCreateMat( 512, 512, CV_8U );*/ //rectify images cvRemap( imageSrcLeft, imagesRectified[0] , mx1, my1 ); cvRemap( imageSrcRight, imagesRectified[1] , mx2, my2 ); CvStereoBMState *BMState = cvCreateStereoBMState(); BMState->preFilterSize=41; BMState->preFilterCap=31; BMState->SADWindowSize=41; BMState->minDisparity=-64; BMState->numberOfDisparities=128; BMState->textureThreshold=10; BMState->uniquenessRatio=15; cvFindStereoCorrespondenceBM( imagesRectified[0], imagesRectified[1], imageDepth, BMState); cvNormalize( imageDepth, imageDepthNormalized, 0, 255, CV_MINMAX ); cvReleaseStereoBMState(&BMState); // 2013.4.29 // CZT // cvResize(imageDepthNormalized, imageDepthNormalized_c1); cvFlip(imageDepthNormalized_c1, imageDepthNormalized_c1, 1); // Flip, y-axis return RESULT_OK; }
/* The following function takes as input the rectangular cell for which the histogram of oriented gradients has to be calculated, a matrix hog_cell of dimensions 1x9 to store the bin values for the histogram, the integral histogram, and the normalization scheme to be used. No normalization is done if nomalization = -1 */ void HoGProcessor::calulateHOG_rect(CvRect cell, CvMat* hog_cell, IplImage** integrals,int normalization){ /*Calculate the bin values for each of the bin of the histogram one by one*/ for(int i = 0; i < 9; i++){ float a = ((double*)(integrals[i]->imageData + (cell.y)*(integrals[i]->widthStep)))[cell.x]; float b = ((double*)(integrals[i]->imageData + (cell.y + cell.height)*(integrals[i]->widthStep)))[cell.x + cell.width]; float c = ((double*)(integrals[i]->imageData + (cell.y)*(integrals[i]->widthStep)))[cell.x + cell.width]; float d = ((double*)(integrals[i]->imageData + (cell.y + cell.height)*(integrals[i]->widthStep)))[cell.x]; ((float*)hog_cell->data.fl)[i] = (a+b)-(c+d); } /* Nomalize the matrix */ if(normalization != -1){ cvNormalize(hog_cell,hog_cell,1,0,normalization); } }
void EigenfaceRecognizer::doPCA() { // set the number of eigenvalues to use numEigenvalues = numTrainedImages-1; //the number of face images CvSize faceImgSize; // allocate the eigenvector images faceImgSize.width = images[0]->width; faceImgSize.height = images[0]->height; eigenVectors = (IplImage**)cvAlloc(sizeof(IplImage*) * numEigenvalues); for(int i = 0; i < numEigenvalues; i++) eigenVectors[i] = cvCreateImage(faceImgSize, IPL_DEPTH_32F, 1); // allocate the eigenvalue array eigenvalues = cvCreateMat( 1, numEigenvalues, CV_32FC1 ); // allocate the averaged image averageImage = cvCreateImage(faceImgSize, IPL_DEPTH_32F, 1); // set the PCA termination criterion CvTermCriteria calcLimit = cvTermCriteria( CV_TERMCRIT_ITER, numEigenvalues, 1); // compute average image, eigenvalues, and eigenvectors // the new C++ api has a cv::PCA object for this, upgrade this in the future! cvCalcEigenObjects( numTrainedImages, (void*)images, (void*)eigenVectors, CV_EIGOBJ_NO_CALLBACK, 0, 0, &calcLimit, averageImage, eigenvalues->data.fl); cvNormalize(eigenvalues, eigenvalues, 1, 0, CV_L1, 0); }
// Ham tinh dac trung HOG tai moi Cell void HOGProcessor::calcHOGForCell(CvRect cell, CvMat* hogCell, IplImage** integrals, int normalization) { // Lan luot tinh cuong do tich luy tai moi bin for(int i = 0; i < NumOfBins; i++){ float a = ((double*)(integrals[i]->imageData + (cell.y)*(integrals[i]->widthStep)))[cell.x]; float b = ((double*)(integrals[i]->imageData + (cell.y + cell.height)*(integrals[i]->widthStep)))[cell.x + cell.width]; float c = ((double*)(integrals[i]->imageData + (cell.y)*(integrals[i]->widthStep)))[cell.x + cell.width]; float d = ((double*)(integrals[i]->imageData + (cell.y + cell.height)*(integrals[i]->widthStep)))[cell.x]; ((float*)hogCell->data.fl)[i] = (a+b)-(c+d); } // Chuan hoa vector dac trung if(normalization != -1){ cvNormalize(hogCell, hogCell, 1, 0, normalization); } }
// Ham tinh dac trung HOG tai moi block void HOGProcessor::calcHOGForBlock(CvRect block, CvMat* hogBlock, IplImage** integrals,CvSize cell, int normalization) { int cellStartX, cellStartY; CvMat vectorCell; int startcol = 0; for(cellStartY = block.y; cellStartY <= block.y + block.height - cell.height; cellStartY += cell.height) { for(cellStartX = block.x; cellStartX <= block.x + block.width - cell.width; cellStartX += cell.width) { cvGetCols(hogBlock, &vectorCell, startcol, startcol + NumOfBins); calcHOGForCell(cvRect(cellStartX, cellStartY, cell.width, cell.height), &vectorCell, integrals, -1); startcol += NumOfBins; } } if(normalization != -1){ cvNormalize(hogBlock, hogBlock, 1, 0, normalization); } }
bool CFeatureExtraction::GetTextureChannels(CvMat * pChannels, CvMat * pTextureChannelsArr[]) { printf("\nCFeatureExtraction::GetTextureChannels in\n"); int gaborSize = 24; int histSize = 30; int vectorSize = gaborSize+histSize; // Calc the full histogram vectors CvMat * pHistMat = cvCreateMat( m_nWidth*m_nHeight, histSize , CV_32F ); CalcHistogram(m_pSrcImg, pHistMat); // Do we need to normalize? cvNormalize(pHistMat, pHistMat, 0, 1, CV_MINMAX); CvMat * pGaborMat = cvCreateMat (m_nWidth * m_nHeight, gaborSize, CV_32F); GetGaborResponse(pGaborMat); // Do we need to normalize? //cvNormalize(pGaborMat, pGaborMat, 0, 1, CV_MINMAX); // Our merged matrix CvMat * pTextureMat = cvCreateMat( m_nWidth*m_nHeight, vectorSize , CV_32F ); // Combine into a single matrix MergeMatrices(pGaborMat, pHistMat, pTextureMat); // This matrix would hold the values represented in the new basis we've found //CvMat * pResultMat = cvCreateMat( m_nWidth*m_nHeight, TEXTURE_CHANNEL_NUM , CV_32F ); CvMat * pResultMat = pChannels; // Actual calculation DoPCA(pTextureMat, pResultMat, vectorSize, TEXTURE_CHANNEL_NUM); // Extracting the 3 primary channels //GetChannels(pResultMat, pTextureChannelsArr, TEXTURE_CHANNEL_NUM, TEXTURE_CHANNEL_NUM); cvReleaseMat(&pHistMat); cvReleaseMat(&pGaborMat); cvReleaseMat(&pTextureMat); printf("CFeatureExtraction::GetTextureChannels out\n"); return true; }
//主成分分析 void doPCA() { int i; CvTermCriteria calcLimit; CvSize faceImgSize; // 自己设置主特征值个数 nEigens = nTrainFaces-1; //分配特征向量存储空间 faceImgSize.width = faceImgArr[0]->width; faceImgSize.height = faceImgArr[0]->height; eigenVectArr = (IplImage**)cvAlloc(sizeof(IplImage*) * nEigens); //分配个数为住特征值个数 for(i=0; i<nEigens; i++) eigenVectArr[i] = cvCreateImage(faceImgSize, IPL_DEPTH_32F, 1); //分配主特征值存储空间 eigenValMat = cvCreateMat( 1, nEigens, CV_32FC1 ); // 分配平均图像存储空间 pAvgTrainImg = cvCreateImage(faceImgSize, IPL_DEPTH_32F, 1); // 设定PCA分析结束条件 calcLimit = cvTermCriteria( CV_TERMCRIT_ITER, nEigens, 1); // 计算平均图像,特征值,特征向量 cvCalcEigenObjects( nTrainFaces, (void*)faceImgArr, (void*)eigenVectArr, CV_EIGOBJ_NO_CALLBACK, 0, 0, &calcLimit, pAvgTrainImg, eigenValMat->data.fl); cvNormalize(eigenValMat, eigenValMat, 1, 0, CV_L1, 0); }
// Do the Principal Component Analysis, finding the average image // and the eigenfaces that represent any image in the given dataset. void doPCA() { int i; CvTermCriteria calcLimit; CvSize faceImgSize; // set the number of eigenvalues to use nEigens = nTrainFaces-1; // allocate the eigenvector images faceImgSize.width = faceImgArr[0]->width; faceImgSize.height = faceImgArr[0]->height; eigenVectArr = (IplImage**)cvAlloc(sizeof(IplImage*) * nEigens); for(i=0; i<nEigens; i++) eigenVectArr[i] = cvCreateImage(faceImgSize, IPL_DEPTH_32F, 1); // allocate the eigenvalue array eigenValMat = cvCreateMat( 1, nEigens, CV_32FC1 ); // allocate the averaged image pAvgTrainImg = cvCreateImage(faceImgSize, IPL_DEPTH_32F, 1); // set the PCA termination criterion calcLimit = cvTermCriteria( CV_TERMCRIT_ITER, nEigens, 1); // compute average image, eigenvalues, and eigenvectors cvCalcEigenObjects( nTrainFaces, (void*)faceImgArr, (void*)eigenVectArr, CV_EIGOBJ_NO_CALLBACK, 0, 0, &calcLimit, pAvgTrainImg, eigenValMat->data.fl); cvNormalize(eigenValMat, eigenValMat, 1, 0, CV_L1, 0); }
void CDataOut::Feat() { CvMat *m; m=cvCreateMat(3,1,CV_32FC1); for (list<CElempunto*>::iterator It=pEstimator->pMap->bbdd.begin();It != pEstimator->pMap->bbdd.end();It++) { ///FIXME PONER en una funcion para que solo esté en un sitio cvmSet(m,0,0,cos((*It)->theta)*sin((*It)->phi)); cvmSet(m,1,0,-sin((*It)->theta)); cvmSet(m,2,0,cos((*It)->theta)*cos((*It)->phi)); cvNormalize( m, m); FeatFile<<(*It)->wx +cvmGet(m,0,0)/(*It)->rho<<" "; FeatFile<<(*It)->wy +cvmGet(m,1,0)/(*It)->rho<<" "; FeatFile<<(*It)->wz +cvmGet(m,2,0)/(*It)->rho<<" "; FeatFile<<(*It)->ID<<" "; FeatFile<<(*It)->count; FeatFile<<endl; } cvReleaseMat(&m); }
IplImage* BouyBaseObject::GetMask(const IplImage * imgIn, IplImage * debugOut) const { if(imgIn == NULL) return NULL; IplImage* colormask = NULL; IplImage* gvcolormask = NULL; //IplImage* shapemask = ShapeMask(imgIn); IplImage* segmentationmask = NULL; IplImage* histogrammask = NULL; //IplImage* edgemask = EdgeMask(imgIn); IplImage* templatemask = NULL; // if(colormask == NULL || shapemask == NULL || // segmentationmask== NULL || histogrammask == NULL || // edgemask == NULL ) return NULL; //cvShowImage("colormask", colormask); //cvShowImage("channelmask", channelmask); IplImage * imgOut = cvCreateImage(cvGetSize(imgIn),IPL_DEPTH_8U, 1); IplImage * threshold = cvCreateImage(cvGetSize(imgIn),IPL_DEPTH_8U, 1); cvZero(imgOut); if(mEnableHist) { histogrammask = HistogramMask(imgIn); } if(mEnableColor) { colormask = ColorMask(imgIn); } if(mEnableSegment) { segmentationmask = SegmentationMask(imgIn); } if(mEnableGVColor) { gvcolormask = GVColorMask(imgIn); } int count = 1; if(VisionUtils::CombineMasks(imgOut,histogrammask,imgOut,count,mHistWeight)) { count++; } if(VisionUtils::CombineMasks(imgOut,colormask,imgOut, count, mColorWeight)) { count++; } if(VisionUtils::CombineMasks(imgOut,segmentationmask,imgOut,count,mSegmentWeight)) { count++; } if(VisionUtils::CombineMasks(imgOut,gvcolormask,imgOut,count,mGVColorWeight)) { count++; } //VisionUtils::CombineMasks(imgOut,edgemask,imgOut,2,1); //VisionUtils::CombineMasks(imgOut,histogrammask,imgOut,2,1); cvNormalize(imgOut,imgOut,255,0,CV_MINMAX); if(mDebug) { cvShowImage("combined", imgOut); } cvThreshold(imgOut,threshold,mMainThreshold,255,CV_THRESH_BINARY ); std::list<CvBox2D> blobList; blobList = Zebulon::Vision::VisionUtils::GetBlobBoxes(threshold,0,mMinNoiseSizePercent); for(std::list<CvBox2D>::iterator it = blobList.begin(); it != blobList.end(); it++) { CvPoint2D32f boxCorners32[4]; CvPoint boxCorners[4]; cvBoxPoints(*it,boxCorners32); for(int i = 0; i < 4; i ++) { boxCorners[i] = cvPointFrom32f(boxCorners32[i]); } cvFillConvexPoly(threshold,boxCorners,4,cvScalar(0,0,0),4); //Zebulon::Vision::VisionUtils::DrawSquare(imgOut,*it); } //shapemask = FindCircles(imgOut); imgOut = TemplateMask(imgOut, threshold, mTemplate); //VisionUtils::CombineMasks(imgOut,templatemask,imgOut); if(mDebug) { cvShowImage("clean", threshold); cvShowImage("final", imgOut); cvShowImage("color", colormask); cvShowImage("hist", histogrammask); cvShowImage("segment", segmentationmask); cvShowImage("template", templatemask); cvShowImage("gvcolor", gvcolormask); } cvReleaseImage(&colormask); //cvReleaseImage(&shapemask); cvReleaseImage(&segmentationmask); cvReleaseImage(&histogrammask); cvReleaseImage(&gvcolormask); //cvReleaseImage(&edgemask); return imgOut; }
void CvEM::init_em( const CvVectors& train_data ) { CvMat *w = 0, *u = 0, *tcov = 0; CV_FUNCNAME( "CvEM::init_em" ); __BEGIN__; double maxval = 0; int i, force_symm_plus = 0; int nclusters = params.nclusters, nsamples = train_data.count, dims = train_data.dims; if( params.start_step == START_AUTO_STEP || nclusters == 1 || nclusters == nsamples ) init_auto( train_data ); else if( params.start_step == START_M_STEP ) { for( i = 0; i < nsamples; i++ ) { CvMat prob; cvGetRow( params.probs, &prob, i ); cvMaxS( &prob, 0., &prob ); cvMinMaxLoc( &prob, 0, &maxval ); if( maxval < FLT_EPSILON ) cvSet( &prob, cvScalar(1./nclusters) ); else cvNormalize( &prob, &prob, 1., 0, CV_L1 ); } EXIT; // do not preprocess covariation matrices, // as in this case they are initialized at the first iteration of EM } else { CV_ASSERT( params.start_step == START_E_STEP && params.means ); if( params.weights && params.covs ) { cvConvert( params.means, means ); cvReshape( weights, weights, 1, params.weights->rows ); cvConvert( params.weights, weights ); cvReshape( weights, weights, 1, 1 ); cvMaxS( weights, 0., weights ); cvMinMaxLoc( weights, 0, &maxval ); if( maxval < FLT_EPSILON ) cvSet( weights, cvScalar(1./nclusters) ); cvNormalize( weights, weights, 1., 0, CV_L1 ); for( i = 0; i < nclusters; i++ ) CV_CALL( cvConvert( params.covs[i], covs[i] )); force_symm_plus = 1; } else init_auto( train_data ); } CV_CALL( tcov = cvCreateMat( dims, dims, CV_64FC1 )); CV_CALL( w = cvCreateMat( dims, dims, CV_64FC1 )); if( params.cov_mat_type == COV_MAT_GENERIC ) CV_CALL( u = cvCreateMat( dims, dims, CV_64FC1 )); for( i = 0; i < nclusters; i++ ) { if( force_symm_plus ) { cvTranspose( covs[i], tcov ); cvAddWeighted( covs[i], 0.5, tcov, 0.5, 0, tcov ); } else cvCopy( covs[i], tcov ); cvSVD( tcov, w, u, 0, CV_SVD_MODIFY_A + CV_SVD_U_T + CV_SVD_V_T ); if( params.cov_mat_type == COV_MAT_SPHERICAL ) cvSetIdentity( covs[i], cvScalar(cvTrace(w).val[0]/dims) ); else if( params.cov_mat_type == COV_MAT_DIAGONAL ) cvCopy( w, covs[i] ); else { // generic case: covs[i] = (u')'*max(w,0)*u' cvGEMM( u, w, 1, 0, 0, tcov, CV_GEMM_A_T ); cvGEMM( tcov, u, 1, 0, 0, covs[i], 0 ); } } __END__; cvReleaseMat( &w ); cvReleaseMat( &u ); cvReleaseMat( &tcov ); }
void ConvertRealImage(IplImage* im,IplImage* gray8u,IplImage* rgb8u) { cvNormalize(im,im,1,0,CV_MINMAX); cvScale(im,gray8u,255,0); cvCvtColor(gray8u,rgb8u,CV_GRAY2BGR); }
int main() { ImageFrame imageFrame; DepthFrame depthFrame; KinectComp kinect; SetPorperty(kinect, GetProperty()); if (kinect.initialize() != OPROS_SUCCESS) { return 0; } if (kinect.start() != OPROS_SUCCESS) { return 0; } KinectServiceProvided& kinectService = dynamic_cast<KinectServiceProvided&>(*kinect.getPort("KinectService")); kinectService.SetCameraAngle(20); kinectService.SetCameraAngle(-20); kinectService.SetCameraAngle(10); vector<Skeleton> skeletons; imageFrame = kinectService.GetImage(); depthFrame = kinectService.GetDepthImage(); return 0; if (imageFrame.data.isNULL() || depthFrame.data.isNULL()) { return 0; } IplImage* image = cvCreateImageHeader(cvSize(imageFrame.width, imageFrame.height), 8, 3); IplImage* depthImage = cvCreateImage(cvSize(depthFrame.width, depthFrame.height), 8, 1); IplImage* depthImageHeader = cvCreateImageHeader(cvSize(depthFrame.width, depthFrame.height), 16, 1); for (;;) { imageFrame = kinectService.GetImage(); depthFrame = kinectService.GetDepthImage(); skeletons = kinectService.GetSkeletonAll(); if(skeletons.size() != 0) { for(size_t i = 0; i < skeletons.size(); i++) { Skeleton& skeleton = skeletons[i]; char* resultMessage = ""; CvScalar color = CV_RGB(255, skeleton.userID * 2, (skeleton.userID >> 8) * 2); switch(skeleton.result) { case Skeleton::NOT_TRACKED: resultMessage = "Not Tracked"; break; case Skeleton::POSITION_ONLY: cvCircle(image, cvPoint((int)skeleton.position.x , (int)skeleton.position.y), 5, color, 1); resultMessage = "Position Only"; break; case Skeleton::TRACKED: for(int index = 0; index < skeleton.JOINT_COUNT; index++) { cvCircle(image, cvPoint((int)skeleton.joints[index].x , (int)skeleton.joints[index].y), 5, color); } resultMessage = "Tracked"; break; default: resultMessage = "Unknown"; break; } printf("%d : %s\r\n", skeleton.userID, resultMessage); } } image->imageData = (char*)&imageFrame.data->operator[](0); depthImageHeader->imageData = (char*)&depthFrame.data->operator[](0); cvNormalize(depthImageHeader, depthImage, 0, 0xFF, CV_MINMAX); cvShowImage("Kinect Camera", image); cvShowImage("Kinect Depth", depthImage); if (cvWaitKey(1) == 27) { break; } } cvReleaseImage(&image); cvReleaseImage(&depthImage); cvReleaseImageHeader(&depthImageHeader); kinect.stop(); kinect.destroy(); return 0; }
DMZ_INTERNAL NHorizontalSegmentation best_n_hseg(IplImage *y_strip, NVerticalSegmentation vseg) { // Gradient IplImage *grad = cvCreateImage(cvSize(428, 27), IPL_DEPTH_8U, 1); llcv_morph_grad3_2d_cross_u8(y_strip, grad); // Reduce (sum), normalize IplImage *grad_sum = cvCreateImage(cvSize(428, 1), IPL_DEPTH_32F, 1); // could sum to IPL_DEPTH_16U and then convert to 32F for normalization, doing it this way for simplicity, will probably get changed during optimization cvReduce(grad, grad_sum, 0 /* reduce to single row */, CV_REDUCE_SUM); cvNormalize(grad_sum, grad_sum, 0.0f, 1.0f, CV_MINMAX, NULL); cvReleaseImage(&grad); NHorizontalSegmentation best; best.n_offsets = vseg.number_length; best.score = 428.0f; // lower is better, this is the max possible (i.e. the worst) best.number_width = 0.0f; memset(&best.offsets, 0, 16 * sizeof(uint16_t)); float *grad_sum_data = (float *)llcv_get_data_origin(grad_sum); SliceF32 width_slice; SliceU16 offset_slice; width_slice.min = 17.1f; width_slice.max = 19.7f; width_slice.step = 0.5f; offset_slice.min = 0; offset_slice.max = SliceU16_MAX; offset_slice.step = 10; best = best_n_hseg_constrained(grad_sum_data, vseg, best, width_slice, offset_slice); // In the following lines, there's some bounds checking on offset_slice.min. // It is needed because it prevents underflow due to using uints. (The uint/int issue // also explains the ?: method instead of subtracting and taking max vs 0.) // There's no bounds checking needed on width_slice.min/max/step, because they can't // get outside of a reasonable range in the steps below. // The bounds checking on offset_slice.max is done in best_n_hseg_constrained, because // it can't overflow, and because we don't know enough here to do it conveniently and DRYly width_slice.min = best.number_width - 0.5f; width_slice.max = best.number_width + 0.5f; width_slice.step = 0.2f; offset_slice.min = best.pattern_offset < 10 ? 0 : best.pattern_offset - 10; offset_slice.max = best.pattern_offset + 10; offset_slice.step = 1; best = best_n_hseg_constrained(grad_sum_data, vseg, best, width_slice, offset_slice); width_slice.min = best.number_width - 0.2f; width_slice.max = best.number_width + 0.2f; width_slice.step = 0.1f; offset_slice.min = best.pattern_offset < 3 ? 0 : best.pattern_offset - 3; offset_slice.max = best.pattern_offset + 3; offset_slice.step = 1; best = best_n_hseg_constrained(grad_sum_data, vseg, best, width_slice, offset_slice); width_slice.min = best.number_width - 0.1f; width_slice.max = best.number_width + 0.1f; width_slice.step = 0.05f; offset_slice.min = best.pattern_offset < 3 ? 0 : best.pattern_offset - 3; offset_slice.max = best.pattern_offset + 3; offset_slice.step = 1; best = best_n_hseg_constrained(grad_sum_data, vseg, best, width_slice, offset_slice); cvReleaseImage(&grad_sum); return best; }
/** * @brief Return an Image (gandalf image class) with a specific Type * @param Type The Type of gabor kernel, e.g. REAL, IMAG, MAG, PHASE * @return Pointer to image structure, or NULL on failure */ IplImage* CvGabor::get_image(int Type) { if(IsKernelCreate() == false) { perror("Error: the Gabor kernel has not been created in get_image()!\n"); return NULL; } else { IplImage* pImage; IplImage *newimage; newimage = cvCreateImage(cvSize(Width,Width), IPL_DEPTH_8U, 1 ); //printf("Width is %d.\n",(int)Width); //printf("Sigma is %f.\n", Sigma); //printf("F is %f.\n", F); //printf("Phi is %f.\n", Phi); //pImage = gan_image_alloc_gl_d(Width, Width); pImage = cvCreateImage( cvSize(Width,Width), IPL_DEPTH_32F, 1 ); CvMat* kernel = cvCreateMat(Width, Width, CV_32FC1); double ve; // CvScalar S; CvSize size = cvGetSize( kernel ); int rows = size.height; int cols = size.width; switch(Type) { case 1: //Real cvCopy( (CvMat*)Real, (CvMat*)kernel, NULL ); //pImage = cvGetImage( (CvMat*)kernel, pImageGL ); for (int i = 0; i < rows; i++) { for (int j = 0; j < cols; j++) { ve = cvGetReal2D((CvMat*)kernel, i, j); cvSetReal2D( (IplImage*)pImage, j, i, ve ); } } break; case 2: //Imag cvCopy( (CvMat*)Imag, (CvMat*)kernel, NULL ); //pImage = cvGetImage( (CvMat*)kernel, pImageGL ); for (int i = 0; i < rows; i++) { for (int j = 0; j < cols; j++) { ve = cvGetReal2D((CvMat*)kernel, i, j); cvSetReal2D( (IplImage*)pImage, j, i, ve ); } } break; case 3: //Magnitude ///@todo printf("[CvGabor::get_image] Error: No magnitude available.\n"); break; case 4: //Phase ///@todo printf("[CvGabor::get_image] Error: No phase available.\n"); break; } cvNormalize((IplImage*)pImage, (IplImage*)pImage, 0, 255, CV_MINMAX, NULL ); cvConvertScaleAbs( (IplImage*)pImage, (IplImage*)newimage, 1, 0 ); cvReleaseMat(&kernel); cvReleaseImage(&pImage); return newimage; } }
/** * @brief CvGabor::conv_img_a(IplImage *src, IplImage *dst, int Type) */ void CvGabor::conv_img_a(IplImage *src, IplImage *dst, int Type) //图像做gabor卷积 函数名:conv_img_a { double ve, re,im; int width = src->width; int height = src->height; CvMat *mat = cvCreateMat(src->width, src->height, CV_32FC1); for (int i = 0; i < width; i++) //对整幅图像进行图像坐标转换 { for (int j = 0; j < height; j++) { ve = cvGetReal2D((IplImage*)src, j, i); cvSetReal2D( (CvMat*)mat, i, j, ve ); } } CvMat *rmat = cvCreateMat(width, height, CV_32FC1); //存实部 CvMat *imat = cvCreateMat(width, height, CV_32FC1); //存虚部 CvMat *kernel = cvCreateMat( Width, Width, CV_32FC1 ); //创建核函数窗口 switch (Type) { case CV_GABOR_REAL: //实部卷积 cvCopy( (CvMat*)Real, (CvMat*)kernel, NULL ); cvFilter2D( (CvMat*)mat, (CvMat*)mat, (CvMat*)kernel, cvPoint( (Width-1)/2, (Width-1)/2)); break; case CV_GABOR_IMAG: //虚部卷积 cvCopy( (CvMat*)Imag, (CvMat*)kernel, NULL ); cvFilter2D( (CvMat*)mat, (CvMat*)mat, (CvMat*)kernel, cvPoint( (Width-1)/2, (Width-1)/2)); break; case CV_GABOR_MAG: //实部与虚部卷积 /* Real Response */ cvCopy( (CvMat*)Real, (CvMat*)kernel, NULL ); cvFilter2D( (CvMat*)mat, (CvMat*)rmat, (CvMat*)kernel, cvPoint( (Width-1)/2, (Width-1)/2)); /* Imag Response */ cvCopy( (CvMat*)Imag, (CvMat*)kernel, NULL ); cvFilter2D( (CvMat*)mat, (CvMat*)imat, (CvMat*)kernel, cvPoint( (Width-1)/2, (Width-1)/2)); /* Magnitude response is the square root of the sum of the square of real response and imaginary response */ for (int i = 0; i < width; i++) { for (int j = 0; j < height; j++) { re = cvGetReal2D((CvMat*)rmat, i, j); im = cvGetReal2D((CvMat*)imat, i, j); ve = sqrt(re*re + im*im); cvSetReal2D( (CvMat*)mat, i, j, ve ); } } break; case CV_GABOR_PHASE: break; } if (dst->depth == IPL_DEPTH_8U) //归一化 { cvNormalize((CvMat*)mat, (CvMat*)mat, 0, 255, CV_MINMAX, NULL); for (int i = 0; i < width; i++) { for (int j = 0; j < height; j++) { ve = cvGetReal2D((CvMat*)mat, i, j); ve = cvRound(ve); cvSetReal2D( (IplImage*)dst, j, i, ve ); } } } if (dst->depth == IPL_DEPTH_32F) { for (int i = 0; i < width; i++) { for (int j = 0; j < height; j++) { ve = cvGetReal2D((CvMat*)mat, i, j); cvSetReal2D( (IplImage*)dst, j, i, ve ); } } } cvReleaseMat(&kernel); cvReleaseMat(&imat); cvReleaseMat(&rmat); cvReleaseMat(&mat); }