void asef_initialze(AsefEyeLocator *asef, const char *file_name){ load_asef_filters(file_name, &asef->n_rows, &asef->n_cols, &asef->lrect, &asef->rrect, &asef->lfilter, &asef->rfilter); asef->lfilter_dft = cvCreateMat(asef->n_rows, asef->n_cols, CV_32FC1); asef->rfilter_dft = cvCreateMat(asef->n_rows, asef->n_cols, CV_32FC1); asef->image = cvCreateMat(asef->n_rows, asef->n_cols, CV_32FC1); asef->image_tile = cvCreateMat(asef->n_rows, asef->n_cols, CV_8UC1); asef->lcorr = cvCreateMat(asef->n_rows, asef->n_cols, CV_32FC1); asef->rcorr = cvCreateMat(asef->n_rows, asef->n_cols, CV_32FC1); asef->lroi = cvCreateMatHeader(asef->n_rows, asef->n_cols, CV_32FC1); asef->rroi = cvCreateMatHeader(asef->n_rows, asef->n_cols, CV_32FC1); cvDFT(asef->lfilter, asef->lfilter_dft, CV_DXT_FORWARD, 0); cvDFT(asef->rfilter, asef->rfilter_dft, CV_DXT_FORWARD, 0); cvGetSubRect(asef->lcorr, asef->lroi, asef->lrect); cvGetSubRect(asef->rcorr, asef->rroi, asef->rrect); asef->lut = cvCreateMat(256, 1, CV_32FC1); for (int i = 0; i<256; i++){ cvmSet(asef->lut, i, 0, 1.0 + i); } cvLog(asef->lut, asef->lut); }
/// <summary> /// Given image of single character and bounding box, /// resizes it to new_width and new_height, and if printResult is 1, prints result after running k-nearest algorithm. /// </summary> /// <params name="imsSrc"> /// Source image which has to be processed. /// </params> /// <params name="new_width"> /// Width to which image has to be resized before running k-nearest algorithm in it. /// </params> /// <params name="new_height"> /// Height to which image has to be resized before running k-nearest algorithm in it. /// </params> /// <params name="printResult"> /// Indicates whether result has be printed, if its non-zero result are printed after running k-neares algorithm. /// </params> /// <returns> Result after classifying image. </returns> float OCR::process(IplImage* imgSrc, int new_width, int new_height, int printResult, CvRect bb) { IplImage* result; IplImage* scaledResult; CvMat data; CvMat dataA; CvRect bba;//bounding box maintain aspect ratio. //Get bounding box data and no with aspect ratio, the x and y can be corrupted cvGetSubRect(imgSrc, &data, cvRect(bb.x, bb.y, bb.width, bb.height)); //Create image with this data with width and height with aspect ratio 1 //then we get highest size betwen width and height of our bounding box int size=(bb.width>bb.height)?bb.width:bb.height; result=cvCreateImage( cvSize( size, size ), 8, 1 ); cvSet(result,CV_RGB(255,255,255),NULL); //Copy data to center of image int x=(int)floor((float)(size-bb.width)/2.0f); int y=(int)floor((float)(size-bb.height)/2.0f); //Get center of the result into dataA. cvGetSubRect(result, &dataA, cvRect(x,y,bb.width, bb.height)); cvCopy(&data, &dataA, NULL); //Scale result scaledResult=cvCreateImage( cvSize( new_width, new_height ), 8, 1 ); cvResize(result, scaledResult, CV_INTER_NN); //Return processed data return print(*scaledResult, printResult); }
int main(int argc, char** argv) { int M1 = 2; int M2 = 2; int N1 = 2; int N2 = 2; // initialize A and B // CvMat* A = cvCreateMat(M1, N1, CV_32F); CvMat* B = cvCreateMat(M2, N2, A->type); // it is also possible to have only abs(M2-M1)+1×abs(N2-N1)+1 // part of the full convolution result CvMat* conv = cvCreateMat(A->rows + B->rows - 1, A->cols + B->cols - 1, A->type); int dft_M = cvGetOptimalDFTSize(A->rows + B->rows - 1); int dft_N = cvGetOptimalDFTSize(A->cols + B->cols - 1); CvMat* dft_A = cvCreateMat(dft_M, dft_N, A->type); CvMat* dft_B = cvCreateMat(dft_M, dft_N, B->type); CvMat tmp; // copy A to dft_A and pad dft_A with zeros // cvGetSubRect(dft_A, &tmp, cvRect(0, 0, A->cols, A->rows)); cvCopy(A, &tmp); cvGetSubRect(dft_A, &tmp, cvRect(A->cols, 0, dft_A->cols - A->cols, A->rows)); cvZero(&tmp); // no need to pad bottom part of dft_A with zeros because of // use nonzero_rows parameter in cvDFT() call below // cvDFT(dft_A, dft_A, CV_DXT_FORWARD, A->rows); // repeat the same with the second array // cvGetSubRect(dft_B, &tmp, cvRect(0, 0, B->cols, B->rows)); cvCopy(B, &tmp); cvGetSubRect(dft_B, &tmp, cvRect(B->cols, 0, dft_B->cols - B->cols, B->rows)); cvZero(&tmp); // no need to pad bottom part of dft_B with zeros because of // use nonzero_rows parameter in cvDFT() call below // cvDFT(dft_B, dft_B, CV_DXT_FORWARD, B->rows); // or CV_DXT_MUL_CONJ to get correlation rather than convolution // cvMulSpectrums(dft_A, dft_B, dft_A, 0); // calculate only the top part // cvDFT(dft_A, dft_A, CV_DXT_INV_SCALE, conv->rows); cvGetSubRect(dft_A, &tmp, cvRect(0, 0, conv->cols, conv->rows)); cvCopy(&tmp, conv); return 0; }
int asef_initialze(AsefEyeLocator *asef, const char *asef_file_name, const char *fd_file_name){ if ( !asef || !asef_file_name || !fd_file_name || strlen(asef_file_name)==0 || strlen(fd_file_name)==0) return -1; // For face detection: asef->face_detection_buffer = cvCreateMemStorage(0); asef->face_detection_classifier = fd_load_detector( fd_file_name ); if ( !asef->face_detection_classifier ) return -1; // For asef eye locator: if ( load_asef_filters(asef_file_name, &asef->n_rows, &asef->n_cols, &asef->lrect, &asef->rrect, &asef->lfilter, &asef->rfilter) ) return -1; asef->lfilter_dft = cvCreateMat(asef->n_rows, asef->n_cols, CV_32FC1); asef->rfilter_dft = cvCreateMat(asef->n_rows, asef->n_cols, CV_32FC1); asef->scaled_face_image_32fc1 = cvCreateMat(asef->n_rows, asef->n_cols, CV_32FC1); asef->scaled_face_image_8uc1 = cvCreateMat(asef->n_rows, asef->n_cols, CV_8UC1); asef->lcorr = cvCreateMat(asef->n_rows, asef->n_cols, CV_32FC1); asef->rcorr = cvCreateMat(asef->n_rows, asef->n_cols, CV_32FC1); asef->lroi = cvCreateMatHeader(asef->n_rows, asef->n_cols, CV_32FC1); asef->rroi = cvCreateMatHeader(asef->n_rows, asef->n_cols, CV_32FC1); asef->lut = cvCreateMat(256, 1, CV_32FC1); if ( !(asef->lfilter_dft && asef->rfilter_dft && asef->scaled_face_image_32fc1 && asef->scaled_face_image_8uc1 && asef->lcorr && asef->rcorr && asef->lroi && asef->rroi && asef->lut) ){ return -1; } cvDFT(asef->lfilter, asef->lfilter_dft, CV_DXT_FORWARD, 0); cvDFT(asef->rfilter, asef->rfilter_dft, CV_DXT_FORWARD, 0); cvGetSubRect(asef->lcorr, asef->lroi, asef->lrect); cvGetSubRect(asef->rcorr, asef->rroi, asef->rrect); for (int i = 0; i<256; i++){ cvmSet(asef->lut, i, 0, 1.0 + i); } cvLog(asef->lut, asef->lut); return 0; }
////////////////////////perform fourier transform////////////////////////////////////////////////// //fft2 // code comes from http://www.opencv.org.cn/ void CLightSet::fft2(IplImage *src,CvMat *dst) { IplImage * realInput; IplImage * imaginaryInput; IplImage * complexInput; int dft_M, dft_N; CvMat* dft_A, tmp; IplImage * image_Re; IplImage * image_Im; realInput = cvCreateImage( cvGetSize(src), IPL_DEPTH_32F, 1); imaginaryInput = cvCreateImage( cvGetSize(src), IPL_DEPTH_32F, 1); complexInput = cvCreateImage( cvGetSize(src), IPL_DEPTH_32F, 2); cvScale(src, realInput, 1.0, 0.0); cvZero(imaginaryInput); cvMerge(realInput, imaginaryInput, NULL, NULL, complexInput); // dft_M = cvGetOptimalDFTSize( src->height - 1 ); // dft_N = cvGetOptimalDFTSize( src->width - 1 ); dft_M =src->height; dft_N =src->width ; dft_A = cvCreateMat( dft_M, dft_N, CV_32FC2 ); image_Re = cvCreateImage( cvSize(dft_N, dft_M), IPL_DEPTH_32F, 1); image_Im = cvCreateImage( cvSize(dft_N, dft_M), IPL_DEPTH_32F, 1); // copy A to dft_A and pad dft_A with zeros cvGetSubRect( dft_A, &tmp, cvRect(0,0, src->width, src->height)); cvCopy( complexInput, &tmp, NULL ); if( dft_A->cols > src->width ) { cvGetSubRect( dft_A, &tmp, cvRect(src->width,0, dft_A->cols - src->width, src->height)); cvZero( &tmp ); } // no need to pad bottom part of dft_A with zeros because of // use nonzero_rows parameter in cvDFT() call below cvDFT( dft_A, dft_A, CV_DXT_FORWARD, complexInput->height ); cvCopy(dft_A,dst); cvReleaseImage(&realInput); cvReleaseImage(&imaginaryInput); cvReleaseImage(&complexInput); cvReleaseImage(&image_Re); cvReleaseImage(&image_Im); }
// Rearrange the quadrants of Fourier image so that the origin is at // the image center // src & dst arrays of equal size & type void cvShiftDFT(CvArr *src_arr, CvArr *dst_arr ) { CvMat *tmp = NULL; CvMat q1stub, q2stub; CvMat q3stub, q4stub; CvMat d1stub, d2stub; CvMat d3stub, d4stub; CvMat *q1, *q2, *q3, *q4; CvMat *d1, *d2, *d3, *d4; CvSize size = cvGetSize(src_arr); CvSize dst_size = cvGetSize(dst_arr); int cx, cy; if(dst_size.width != size.width || dst_size.height != size.height){ cvError( CV_StsUnmatchedSizes, "cvShiftDFT", "Source and Destination arrays must have equal sizes", __FILE__, __LINE__ ); } if(src_arr==dst_arr){ tmp = rb_cvCreateMat(size.height/2, size.width/2, cvGetElemType(src_arr)); } cx = size.width/2; cy = size.height/2; // image center q1 = cvGetSubRect( src_arr, &q1stub, cvRect(0,0,cx, cy) ); q2 = cvGetSubRect( src_arr, &q2stub, cvRect(cx,0,cx,cy) ); q3 = cvGetSubRect( src_arr, &q3stub, cvRect(cx,cy,cx,cy) ); q4 = cvGetSubRect( src_arr, &q4stub, cvRect(0,cy,cx,cy) ); d1 = cvGetSubRect( src_arr, &d1stub, cvRect(0,0,cx,cy) ); d2 = cvGetSubRect( src_arr, &d2stub, cvRect(cx,0,cx,cy) ); d3 = cvGetSubRect( src_arr, &d3stub, cvRect(cx,cy,cx,cy) ); d4 = cvGetSubRect( src_arr, &d4stub, cvRect(0,cy,cx,cy) ); if(src_arr!=dst_arr){ if( !CV_ARE_TYPES_EQ( q1, d1 )){ cvError( CV_StsUnmatchedFormats, "cvShiftDFT", "Source and Destination arrays must have the same format", __FILE__, __LINE__ ); } cvCopy(q3, d1, 0); cvCopy(q4, d2, 0); cvCopy(q1, d3, 0); cvCopy(q2, d4, 0); } else{ cvCopy(q3, tmp, 0); cvCopy(q1, q3, 0); cvCopy(tmp, q1, 0); cvCopy(q4, tmp, 0); cvCopy(q2, q4, 0); cvCopy(tmp, q2, 0); } if (tmp != NULL) { cvReleaseMat(&tmp); } }
void GenerateAffineTransformFromPose(CvSize size, CvAffinePose pose, CvMat* transform) { CvMat* temp = cvCreateMat(3, 3, CV_32FC1); CvMat* final = cvCreateMat(3, 3, CV_32FC1); cvmSet(temp, 2, 0, 0.0f); cvmSet(temp, 2, 1, 0.0f); cvmSet(temp, 2, 2, 1.0f); CvMat rotation; cvGetSubRect(temp, &rotation, cvRect(0, 0, 3, 2)); cv2DRotationMatrix(cvPoint2D32f(size.width/2, size.height/2), pose.phi, 1.0, &rotation); cvCopy(temp, final); cvmSet(temp, 0, 0, pose.lambda1); cvmSet(temp, 0, 1, 0.0f); cvmSet(temp, 1, 0, 0.0f); cvmSet(temp, 1, 1, pose.lambda2); cvmSet(temp, 0, 2, size.width/2*(1 - pose.lambda1)); cvmSet(temp, 1, 2, size.height/2*(1 - pose.lambda2)); cvMatMul(temp, final, final); cv2DRotationMatrix(cvPoint2D32f(size.width/2, size.height/2), pose.theta - pose.phi, 1.0, &rotation); cvMatMul(temp, final, final); cvGetSubRect(final, &rotation, cvRect(0, 0, 3, 2)); cvCopy(&rotation, transform); cvReleaseMat(&temp); cvReleaseMat(&final); }
int grow_mat::reserve(int maxrows, int maxcols) { if (_mat==0 || maxrows<0 || maxcols<0) return 0; // 只要申请的矩阵的行或列大于当前矩阵就分配内存 if (maxrows > _mat->rows || maxcols > _mat->cols) { // 新矩阵 CvMat * nm = cvCreateMat(maxrows, maxcols, _mat->type); if (nm==0) return 0; if (_zero_flag) cvSetZero(nm); if (this->rows && this->cols && _copy_flag) { // 若当前矩阵大小不为0,且需要复制数据,则将当前矩阵this的数据复制到nm CvMat sub; cvGetSubRect(nm, &sub, cvRect(0,0,this->cols, this->rows)); cvCopy(this, &sub); } cvReleaseMat(&_mat); _mat = nm; } return 1; }
static float CalcAverageMask(CvBlob* pBlob, IplImage* pImgFG ) { /* Calculate sum of mask: */ double Area, Aver = 0; CvRect r; CvMat mat; if(pImgFG==NULL) return 0; r.x = cvRound(pBlob->x - pBlob->w*0.5); r.y = cvRound(pBlob->y - pBlob->h*0.5); r.width = cvRound(pBlob->w); r.height = cvRound(pBlob->h); Area = r.width*r.height; if(r.x<0){r.width += r.x;r.x = 0;} if(r.y<0){r.height += r.y;r.y = 0;} if((r.x+r.width)>=pImgFG->width){r.width=pImgFG->width-r.x-1;} if((r.y+r.height)>=pImgFG->height){r.height=pImgFG->height-r.y-1;} if(r.width>0 && r.height>0) { double Sum = cvSum(cvGetSubRect(pImgFG,&mat,r)).val[0]/255.0; assert(Area>0); Aver = Sum/Area; } return (float)Aver; } /* Calculate sum of mask. */
float basicOCR::classify(IplImage* img, int showResult)//第二个参数主要用来控制是测试训练样本还是手写识别 { IplImage prs_image; CvMat data; CvMat* nearest=cvCreateMat(1,K,CV_32FC1); float result; //处理输入的图像 prs_image = preprocessing(img, size, size); //Set data IplImage* img32 = cvCreateImage( cvSize( size, size ), IPL_DEPTH_32F, 1 ); cvConvertScale(&prs_image, img32, 0.0039215, 0); cvGetSubRect(img32, &data, cvRect(0,0, size,size)); CvMat row_header, *row1; row1 = cvReshape( &data, &row_header, 0, 1 ); result=knn->find_nearest(row1,K,0,0,nearest,0); int accuracy=0; for(int i=0;i<K;i++) { if( (nearest->data.fl[i]) == result) accuracy++; } float pre=100*((float)accuracy/(float)K); char out = result; if(showResult==1) { printf("|\t %c \t| \t %.2f%% \t| \t %d of %d \t| \n",out,pre,accuracy,K); printf(" ------------------------------------------------------------------------\n"); } return result; }
void asef_locate_eyes(AsefEyeLocator *asef, IplImage *image, CvRect face_rect, CvPoint *leye, CvPoint *reye){ asef->face_img.cols = face_rect.width; asef->face_img.rows = face_rect.height; asef->face_img.type = CV_8UC1; asef->face_img.step = face_rect.width; cvGetSubRect(image, &asef->face_img, face_rect); double xscale = ((double)asef->image_tile->cols)/((double)asef->face_img.cols); double yscale = ((double)asef->image_tile->rows)/((double)asef->face_img.rows); cvResize(&asef->face_img, asef->image_tile, CV_INTER_LINEAR); cvLUT(asef->image_tile, asef->image, asef->lut); cvDFT(asef->image, asef->image, CV_DXT_FORWARD, 0); cvMulSpectrums(asef->image, asef->lfilter_dft, asef->lcorr, CV_DXT_MUL_CONJ); cvMulSpectrums(asef->image, asef->rfilter_dft, asef->rcorr, CV_DXT_MUL_CONJ); cvDFT(asef->lcorr, asef->lcorr, CV_DXT_INV_SCALE, 0); cvDFT(asef->rcorr, asef->rcorr, CV_DXT_INV_SCALE, 0); cvMinMaxLoc(asef->lroi, NULL, NULL, NULL, leye, NULL); cvMinMaxLoc(asef->rroi, NULL, NULL, NULL, reye, NULL); leye->x = (asef->lrect.x + leye->x)/xscale + face_rect.x; leye->y = (asef->lrect.y + leye->y)/yscale + face_rect.y; reye->x = (asef->rrect.x + reye->x)/xscale + face_rect.x; reye->y = (asef->rrect.y + reye->y)/yscale + face_rect.y; }
int grow_mat::init(int r, int c, int type, int maxrows, int maxcols) { int no_max = 0; if (maxrows==0) {maxrows=r*_expand_factor;no_max=1;} if (maxcols==0) {maxcols=c*_expand_factor;no_max=1;} if (type==0) type=CV_64FC1; if (r<=0 || c<=0 || maxrows<0 || maxcols<0 || r>maxrows || c>maxcols) return 0; // 为了和mini_solver的set_solver一致,允许再次初始化! if (_mat) { // 若矩阵_mat存在,且类型相同,表示客户想改变大小。 int mat_type = CV_MAT_TYPE(_mat->type); if (CV_MAT_TYPE(type)==mat_type) { return resize(r,c); } } if(_mat) cvReleaseMat(&_mat); if (no_max) _mat = cvCreateMat(r, c, type); else _mat = cvCreateMat(maxrows, maxcols, type); if (_mat==0) return 0; if (_zero_flag) cvSetZero(_mat); cvGetSubRect(_mat, this, cvRect(0,0,c,r)); return 1; }
void asef_locate_eyes(AsefEyeLocator *asef){ asef->face_image.cols = asef->face_rect.width; asef->face_image.rows = asef->face_rect.height; asef->face_image.type = CV_8UC1; asef->face_image.step = asef->face_rect.width; cvGetSubRect(asef->input_image, &asef->face_image, asef->face_rect); double xscale = ((double)asef->scaled_face_image_8uc1->cols)/((double)asef->face_image.cols); double yscale = ((double)asef->scaled_face_image_8uc1->rows)/((double)asef->face_image.rows); cvResize(&asef->face_image, asef->scaled_face_image_8uc1, CV_INTER_LINEAR); cvLUT(asef->scaled_face_image_8uc1, asef->scaled_face_image_32fc1, asef->lut); cvDFT(asef->scaled_face_image_32fc1, asef->scaled_face_image_32fc1, CV_DXT_FORWARD, 0); cvMulSpectrums(asef->scaled_face_image_32fc1, asef->lfilter_dft, asef->lcorr, CV_DXT_MUL_CONJ); cvMulSpectrums(asef->scaled_face_image_32fc1, asef->rfilter_dft, asef->rcorr, CV_DXT_MUL_CONJ); cvDFT(asef->lcorr, asef->lcorr, CV_DXT_INV_SCALE, 0); cvDFT(asef->rcorr, asef->rcorr, CV_DXT_INV_SCALE, 0); cvMinMaxLoc(asef->lroi, NULL, NULL, NULL, &asef->left_eye, NULL); cvMinMaxLoc(asef->rroi, NULL, NULL, NULL, &asef->right_eye, NULL); asef->left_eye.x = (asef->lrect.x + asef->left_eye.x)/xscale + asef->face_rect.x; asef->left_eye.y = (asef->lrect.y + asef->left_eye.y)/yscale + asef->face_rect.y; asef->right_eye.x = (asef->rrect.x + asef->right_eye.x)/xscale + asef->face_rect.x; asef->right_eye.y = (asef->rrect.y + asef->right_eye.y)/yscale + asef->face_rect.y; }
/// <summary> /// Classifies the given prs_image by running k-nearest algorithm and prints the result. /// </summary> /// <param name="prs_image"> /// IplImage to be classified. /// </param> /// <param name="showResult"> /// If its 1, then prints result after classifying. /// </param> /// <returns> Result after classifying given image. </returns> float OCR::print(IplImage prs_image, int showResult) { float result; CvMat data; CvMat* nearest=cvCreateMat(1,K,CV_32FC1); //Set data IplImage* img32 = cvCreateImage( cvSize( size, size ), IPL_DEPTH_32F, 1 ); cvConvertScale(&prs_image, img32, 0.0039215, 0); cvGetSubRect(img32, &data, cvRect(0,0, size,size)); CvMat row_header, *row1; row1 = cvReshape( &data, &row_header, 0, 1 ); result=knn->find_nearest(row1,K,0,0,nearest,0); if(showResult == 1) { char r = result; int accuracy=0; for(int i=0;i<K;i++) { if( nearest->data.fl[i] == result) accuracy++; } printf("%c ",r); // float pre=100*((float)accuracy/(float)K); // printf("|\t%c\t| \t%.2f%% \t| \t%d of %d \t",r,pre,accuracy,K); // printf(" \n---------------------------------------------------------------\n"); } return result; }
void event_mouse(int button, int state, int x, int y) { int err; if (state == GLUT_DOWN) { // Mouse down = find template // Do hough transform to find ball center and radius struct frame *fr = get_frame(active_window->frames, active_window->cur); err = houghTransform(active_window, active_window->cur, x, y); if (err) { return; } fr->flag |= HAS_HOUGH; active_window->guess.x = fr->hough.x; active_window->guess.y = fr->hough.y; // Make a subimage containing the template CvRect r = cvRect(fr->hough.x-fr->hough.radius, fr->hough.y-fr->hough.radius, fr->hough.radius*2, fr->hough.radius*2); CvMat *sub = cvCreateMatHeader(fr->hough.radius*2, fr->hough.radius*2, CV_32FC1); cvGetSubRect(fr->image, sub, r); active_window->tmpl = sub; // Match (could be left out) templateMatch(active_window, active_window->cur, MARGIN, sub); fr->flag |= HAS_MATCH; glutPostRedisplay(); // Calculate meters per pixel active_window->mpp = atof(active_window->argv[2])/(fr->hough.radius*2); printf("Getting mpp: %f/%f = %f\n", atof(active_window->argv[2]), fr->hough.radius*2, active_window->mpp); } }
IplImage* cvGetImageSubRect(IplImage* image, CvRect* rect) { IplImage* res = cvCreateImageHeader(cvSize(rect->width, rect->height), image->depth, image->nChannels); CvMat mat; cvGetSubRect(image, &mat, *rect); cvGetImage(&mat, res); return res; }
/************************************************************************* * @函数名称: * calMISSIM() * @输入: * const IplImage* image1 - 输入图像1 * const IplImage* image2 - 输入图像2 * int n - 每个方块的大小 * @返回值: * double MISSIM - 返回图像的平均改进结构相似度 * @说明: * 计算图像的平均改进结构相似度 **************************************************************************/ double calMISSIM(const IplImage* image1, const IplImage* image2, int n) { double MISSIM = 0; int i, j, k; int row1 = image1->height; int col1 = image1->width; int row2 = image2->height; int col2 = image2->width; if (row1 != row2 || col1 != col2) { printf("Size can't match in calMISSIM()!!"); } int nr = cvFloor(row1 / n); int nc = cvFloor(col1 / n); int N = nr*nc; double ISSIM=0; double sum = 0; CvMat tmp1; CvMat tmp2; IplImage* temp1 = cvCreateImage(cvSize(n, n), image1->depth, image1->nChannels); IplImage* temp2 = cvCreateImage(cvSize(n, n), image1->depth, image1->nChannels); for (i = 0, k = 0; i < nr; i++) { for (j = 0; j < nc; j++, k++) { cvGetSubRect(image1, &tmp1, cvRect(j*n, i*n, n, n)); cvGetSubRect(image2, &tmp2, cvRect(j*n, i*n, n, n)); cvScale(&tmp1, temp1, 1, 0); cvScale(&tmp2, temp2, 1, 0); ISSIM = calISSIM(temp1, temp2); sum += ISSIM; } } MISSIM = sum / N; cvReleaseImage(&temp1); cvReleaseImage(&temp2); return MISSIM; }
int grow_mat::resize(int r, int c) { if (_mat==0 || r<0 || c<0) return 0; if (r <= _mat->rows && c <= _mat->cols) { cvGetSubRect(_mat, this, cvRect(0,0,c,r)); } else { // 新扩展矩阵为原来需要的矩阵大小的_expand_factor倍 int maxrows = (r > _mat->rows ? r*_expand_factor : _mat->rows); int maxcols = (c > _mat->cols ? c*_expand_factor : _mat->cols); reserve(maxrows,maxcols); cvGetSubRect(_mat, this, cvRect(0,0,c,r)); } return 1; }
/// <summary> /// Reads the sample images and associated charaters into trainClasses and trainData respectively. /// </summary> /// <returns> Nothing. </returns> void OCR::getData() { IplImage* src_image; IplImage prs_image; CvMat row,data; char file[255]; char dataFile[255]; std::ifstream labelStream; std::ostringstream outStringStream; char ch; int i,j; for(i = 0; i < classes; i++) { //26 //Read the corresponding character for current sample being processed into ch. sprintf(dataFile,"%s%d/data.txt",file_path, i); labelStream.open(dataFile); labelStream >> ch; labelStream.close(); for( j = 0; j< train_samples; j++) { //3 //Load file //get the path of image for training into file. if(j<10) sprintf(file,"%s%d/%d0%d.pbm",file_path, i, i, j); else sprintf(file,"%s%d/%d%d.pbm",file_path, i, i, j); src_image = cvLoadImage(file,0); if(!src_image) { printf("Error: Cant load image %s\n", file); //exit(-1); } //process file prs_image = preprocessing(src_image, size, size); //Set class label cvGetRow(trainClasses, &row, i*train_samples + j); cvSet(&row, cvRealScalar(ch)); //Set data cvGetRow(trainData, &row, i*train_samples + j); IplImage* img = cvCreateImage( cvSize( size, size ), IPL_DEPTH_32F, 1 ); //convert 8 bits image to 32 float image cvConvertScale(&prs_image, img, 0.0039215, 0); cvGetSubRect(img, &data, cvRect(0,0, size,size)); CvMat row_header, *row1; //convert data matrix sizexsize to vecor row1 = cvReshape( &data, &row_header, 0, 1 ); cvCopy(row1, &row, NULL); } } }
int ASEF_Algorithm::initialize() { if (load_asef_filters(haar_cascade_path, &n_rows, &n_cols, &lrect, &rrect, &lfilter, &rfilter)) return -1; lfilter_dft = cvCreateMat(n_rows, n_cols, CV_32FC1); rfilter_dft = cvCreateMat(n_rows, n_cols, CV_32FC1); scaled_face_image_32fc1 = cvCreateMat(n_rows, n_cols, CV_32FC1); scaled_face_image_8uc1 = cvCreateMat(n_rows, n_cols, CV_8UC1); lcorr = cvCreateMat(n_rows, n_cols, CV_32FC1); rcorr = cvCreateMat(n_rows, n_cols, CV_32FC1); lroi = cvCreateMatHeader(n_rows, n_cols, CV_32FC1); rroi = cvCreateMatHeader(n_rows, n_cols, CV_32FC1); lut = cvCreateMat(256, 1, CV_32FC1); point = cvCreateMat(1, 2, CV_32FC1); if (!(lfilter_dft && rfilter_dft && scaled_face_image_32fc1 && scaled_face_image_8uc1 && lcorr && rcorr && lroi && rroi && lut)) { return -1; } cvDFT(lfilter, lfilter_dft, CV_DXT_FORWARD, 0); cvDFT(rfilter, rfilter_dft, CV_DXT_FORWARD, 0); cvGetSubRect(lcorr, lroi, lrect); cvGetSubRect(rcorr, rroi, rrect); for (int i = 0; i < 256; i++) { cvmSet(lut, i, 0, 1.0 + i); } cvLog(lut, lut); isInitialized = true; return 0; }
IplImage* fftImage(IplImage *img) { IplImage *realpart, *imgpart, *complexpart, *ret; CvMat *ft; int sizeM, sizeN; CvMat tmp; realpart = cvCreateImage(cvGetSize(img), IPL_DEPTH_64F, 1); imgpart = cvCreateImage(cvGetSize(img), IPL_DEPTH_64F, 1); complexpart = cvCreateImage(cvGetSize(img), IPL_DEPTH_64F, 2); cvScale(img, realpart, 1.0, 0.0); // copy grey input image to realpart cvZero(imgpart); // Set imaginary part to 0 cvMerge(realpart, imgpart, NULL, NULL, complexpart); // real+imag to complex /* Messy bit - fft needs sizes to be a power of 2, so the images have to be // embedded into a background of 0 pixels. */ sizeM = cvGetOptimalDFTSize(img->height - 1); sizeN = cvGetOptimalDFTSize(img->width - 1); printf("Size of image to be transformed is %dx%d\n", sizeM, sizeN); ft = cvCreateMat(sizeM, sizeN, CV_64FC2); origin_center(complexpart); // copy A to dft_A and pad dft_A with zeros cvGetSubRect(ft, &tmp, cvRect(0, 0, img->width, img->height)); // tmp points to sub of dft_A cvCopy(complexpart, &tmp, NULL); // Copy complex image into sub of dft_A cvGetSubRect(ft, &tmp, cvRect(img->width, 0, ft->cols - img->width, img->height)); // Get sub of dft_A on right side if ((ft->cols - img->width) > 0) cvZero(&tmp); // Set right margin to zero cvDFT(ft, ft, CV_DXT_FORWARD, complexpart->height); ret = cvMatToImage(ft); cvReleaseMat(&ft); cvReleaseImage(&realpart); cvReleaseImage(&imgpart); cvReleaseImage(&complexpart); return ret; }
//!Apply to a matrix of points void AffineTransform::applyToPoints(const CvMat * positions, CvMat * newPositions) const { CvMat newPositions2d; cvGetSubRect(newPositions, &newPositions2d, cvRect(0,0,newPositions->cols, 2)); cvMatMul(*this, positions, &newPositions2d); for(int i=0; i<newPositions->cols; i++) { cvmSet(newPositions, 0, i, cvmGet(&newPositions2d, 0, i)); cvmSet(newPositions, 1, i, cvmGet(&newPositions2d, 1, i)); cvmSet(newPositions, 2, i, 1.0); } }
grow_mat::grow_mat(int r, int c, int type, void* data, int step) { clear_all(); if (data) { CvMat temp_mat; cvInitMatHeader(&temp_mat, r, c, type, data, step ); _mat = cvCreateMat(r, c, type); cvCopy(&temp_mat,_mat); cvGetSubRect(_mat, this, cvRect(0,0,c,r)); return; } init(r,c,type,0,0); }
void CvOneWayDescriptor::ProjectPCASample(IplImage* patch, CvMat* avg, CvMat* eigenvectors, CvMat* pca_coeffs) const { CvMat* patch_mat = ConvertImageToMatrix(patch); // CvMat eigenvectorsr; // cvGetSubRect(eigenvectors, &eigenvectorsr, cvRect(0, 0, eigenvectors->cols, pca_coeffs->cols)); CvMat* temp = cvCreateMat(1, eigenvectors->cols, CV_32FC1); cvProjectPCA(patch_mat, avg, eigenvectors, temp); CvMat temp1; cvGetSubRect(temp, &temp1, cvRect(0, 0, pca_coeffs->cols, 1)); cvCopy(&temp1, pca_coeffs); cvReleaseMat(&temp); cvReleaseMat(&patch_mat); }
void basicOCR::getData() { IplImage* src_image; IplImage prs_image; CvMat row,data; char file[255]; int i,j; //for(i =0; i<classes; i++) for (i = 32; i < 32 + classes; i++) { for ( j = 0; j < train_samples; j++) { //加载pbm格式图像,作为训练 /*if(j < 10) sprintf(file,"%s%d/%d0%d.pbm",file_path, i - 48, i - 48 , j); else sprintf(file,"%s%d/%d%d.pbm",file_path, i - 48, i - 48 , j);*/ if (i >= 48 && i <= 57) sprintf(file,"%s%d/%d.pbm",file_path, i, j); else sprintf(file,"%s%d/%d.bmp",file_path, i, j); src_image = cvLoadImage(file,0); if(!src_image) { //printf("Error: Cant load image %s\n", file); continue; //exit(-1); } //process file prs_image = preprocessing(src_image, size, size); //Set class label cvGetRow(trainClasses, &row, (i - 32)*train_samples + j); cvSet(&row, cvRealScalar(i)); //Set data cvGetRow(trainData, &row, (i - 32)*train_samples + j); IplImage* img = cvCreateImage( cvSize( size, size ), IPL_DEPTH_32F, 1 ); //convert 8 bits image to 32 float image cvConvertScale(&prs_image, img, 0.0039215, 0); cvGetSubRect(img, &data, cvRect(0,0, size,size)); CvMat row_header, *row1; //convert data matrix sizexsize to vecor row1 = cvReshape( &data, &row_header, 0, 1 ); cvCopy(row1, &row, NULL); } } }
IplImage preprocessing(IplImage* imgSrc, int new_width, int new_height) { IplImage* result; IplImage* scaledResult; CvMat data; CvMat dataA; CvRect bb;//bounding box CvRect bba;//boundinb box maintain aspect ratio //Find bounding box bb=findBB(imgSrc); //Get bounding box data and no with aspect ratio, the x and y can be corrupted cvGetSubRect(imgSrc, &data, cvRect(bb.x, bb.y, bb.width, bb.height)); //Create image with this data with width and height with aspect ratio 1 //then we get highest size betwen width and height of our bounding box //printf("%d %d\n", bb.height, bb.width); int size=(bb.width>bb.height)?bb.width:bb.height; result=cvCreateImage( cvSize( size, size ), 8, 1 ); cvSet(result,CV_RGB(255,255,255),NULL); //Copy de data in center of image //int x=(int)floor((float)(size-bb.width)/2.0f); //int y=(int)floor((float)(size-bb.height)/2.0f); int x = bb.x; int y = bb.y; cvGetSubRect(result, &dataA, cvRect(x,y,bb.width, bb.height)); cvCopy(&data, &dataA, NULL); //Scale result scaledResult=cvCreateImage( cvSize( new_width, new_height ), 8, 1 ); cvResize(result, scaledResult, CV_INTER_NN); //Return processed data return *scaledResult; //return result; }
int templateMatch(struct window *window, int frame, int diam, CvMat *tmpl) { // Init struct frame *fr = get_frame(window->frames, frame); // printf("Guess is (%d, %d), diameter is %d\n", window->guess.x, window->guess.y, diam); float init_x = (float)window->guess.x-diam, init_y = (float)window->guess.y-diam; // See if we can guess were the ball might be CvRect rect = cvRect(init_x, init_y, diam*2, diam*2); // Make sure rect is with image rect.x = rect.x < 0 ? 0 : rect.x; rect.y = rect.y < 0 ? 0 : rect.y; rect.width = rect.x+rect.width > fr->image->cols ? fr->image->cols-rect.x : rect.width; rect.height = rect.y+rect.height > fr->image->rows ? fr->image->rows-rect.y : rect.height; // Get sub rect CvMat *sub = cvCreateMatHeader(rect.height, rect.width, CV_32F); cvGetSubRect(fr->image, sub, rect); CvMat *res = cvCreateMat(sub->rows - tmpl->rows+1, sub->cols - tmpl->cols+1, CV_32F); // Match cvMatchTemplate(sub, tmpl, res, CV_TM_SQDIFF); // Find value and location of min = upper-left corner of template match CvPoint pt; double val; cvMinMaxLoc(res, &val, 0, &pt, 0, 0); // printf("#%d: value of match is %f\n", frame, val); if (val > 20000000) { // Works on sample video // printf("Doubling search area\n"); templateMatch(window, frame, diam*2, tmpl); return 0; } // Match result struct MatchResult mr; mr.x = init_x+pt.x; mr.y = init_y+pt.y; mr.found = 1; fr->match = mr; window->guess.x = mr.x; window->guess.y = mr.y; return 0; }
void ConcatArrs( CvArr ***pppa, CvMat *dst, int rowNum, int colNum, int *colNums /*= NULL */) { CvMat tmpHeader, *sub = 0; int m = 0, n; CvSize sz; for (int i = 0; i < rowNum; i++) { n = 0; for (int j = 0; j < (colNum ? colNum : colNums[i]); j++) { sz = cvGetSize(pppa[i][j]); sub = cvGetSubRect(dst, &tmpHeader, cvRect(n,m, sz.width, sz.height)); cvCopy(pppa[i][j], sub); n += sz.width; } m += sz.height; } }
IplImage *fftImageInv(IplImage *img) { IplImage *ret; CvMat *ft; CvMat tmp; ft = cvCreateMat(img->height, img->width, CV_64FC2); origin_center(img); // copy A to dft_A and pad dft_A with zeros cvGetSubRect(ft, &tmp, cvRect(0, 0, img->width, img->height)); // tmp points to sub of dft_A cvCopy(img, &tmp, NULL); // Copy complex image into sub of dft_A cvDFT(ft, ft, (CV_DXT_SCALE | CV_DXT_INVERSE), img->height); ret = cvMatToImage(ft); cvReleaseMat(&ft); printf("INVFFT Return image is: Depth=%d channels=%d\n", ret->depth, ret->nChannels); return ret; }
int ArrayTest::prepare_test_case( int test_case_idx ) { int code = 1; size_t max_arr = test_array.size(); vector<vector<Size> > sizes(max_arr); vector<vector<Size> > whole_sizes(max_arr); vector<vector<int> > types(max_arr); size_t i, j; RNG& rng = ts->get_rng(); bool is_image = false; for( i = 0; i < max_arr; i++ ) { size_t sizei = std::max(test_array[i].size(), (size_t)1); sizes[i].resize(sizei); types[i].resize(sizei); whole_sizes[i].resize(sizei); } get_test_array_types_and_sizes( test_case_idx, sizes, types ); for( i = 0; i < max_arr; i++ ) { size_t sizei = test_array[i].size(); for( j = 0; j < sizei; j++ ) { unsigned t = randInt(rng); bool create_mask = true, use_roi = false; CvSize size = cvSize(sizes[i][j]), whole_size = size; CvRect roi = CV_STRUCT_INITIALIZER; is_image = !cvmat_allowed ? true : iplimage_allowed ? (t & 1) != 0 : false; create_mask = (t & 6) == 0; // ~ each of 3 tests will use mask use_roi = (t & 8) != 0; if( use_roi ) { whole_size.width += randInt(rng) % 10; whole_size.height += randInt(rng) % 10; } cvRelease( &test_array[i][j] ); if( size.width > 0 && size.height > 0 && types[i][j] >= 0 && (i != MASK || create_mask) ) { if( use_roi ) { roi.width = size.width; roi.height = size.height; if( whole_size.width > size.width ) roi.x = randInt(rng) % (whole_size.width - size.width); if( whole_size.height > size.height ) roi.y = randInt(rng) % (whole_size.height - size.height); } if( is_image ) { test_array[i][j] = cvCreateImage( whole_size, icvTsTypeToDepth[CV_MAT_DEPTH(types[i][j])], CV_MAT_CN(types[i][j]) ); if( use_roi ) cvSetImageROI( (IplImage*)test_array[i][j], roi ); } else { test_array[i][j] = cvCreateMat( whole_size.height, whole_size.width, types[i][j] ); if( use_roi ) { CvMat submat, *mat = (CvMat*)test_array[i][j]; cvGetSubRect( test_array[i][j], &submat, roi ); submat.refcount = mat->refcount; *mat = submat; } } } } } test_mat.resize(test_array.size()); for( i = 0; i < max_arr; i++ ) { size_t sizei = test_array[i].size(); test_mat[i].resize(sizei); for( j = 0; j < sizei; j++ ) { CvArr* arr = test_array[i][j]; test_mat[i][j] = cv::cvarrToMat(arr); if( !test_mat[i][j].empty() ) fill_array( test_case_idx, (int)i, (int)j, test_mat[i][j] ); } } return code; }