void CvMLData::set_train_test_split( const CvTrainTestSplit * spl) { CV_FUNCNAME( "CvMLData::set_division" ); __BEGIN__; int sample_count = 0; if ( !values ) CV_ERROR( CV_StsInternal, "data is empty" ); sample_count = values->rows; float train_sample_portion; if (spl->train_sample_part_mode == CV_COUNT) { train_sample_count = spl->train_sample_part.count; if (train_sample_count > sample_count) CV_ERROR( CV_StsBadArg, "train samples count is not correct" ); train_sample_count = train_sample_count<=0 ? sample_count : train_sample_count; } else // dtype.train_sample_part_mode == CV_PORTION { train_sample_portion = spl->train_sample_part.portion; if ( train_sample_portion > 1) CV_ERROR( CV_StsBadArg, "train samples count is not correct" ); train_sample_portion = train_sample_portion <= FLT_EPSILON || 1 - train_sample_portion <= FLT_EPSILON ? 1 : train_sample_portion; train_sample_count = std::max(1, cvFloor( train_sample_portion * sample_count )); } if ( train_sample_count == sample_count ) { free_train_test_idx(); return; } if ( train_sample_idx && train_sample_idx->cols != train_sample_count ) free_train_test_idx(); if ( !sample_idx) { int test_sample_count = sample_count- train_sample_count; sample_idx = (int*)cvAlloc( sample_count * sizeof(sample_idx[0]) ); for (int i = 0; i < sample_count; i++ ) sample_idx[i] = i; train_sample_idx = cvCreateMatHeader( 1, train_sample_count, CV_32SC1 ); *train_sample_idx = cvMat( 1, train_sample_count, CV_32SC1, &sample_idx[0] ); CV_Assert(test_sample_count > 0); test_sample_idx = cvCreateMatHeader( 1, test_sample_count, CV_32SC1 ); *test_sample_idx = cvMat( 1, test_sample_count, CV_32SC1, &sample_idx[train_sample_count] ); } mix = spl->mix; if ( mix ) mix_train_and_test_idx(); __END__; }
void asef_initialze(AsefEyeLocator *asef, const char *file_name){ load_asef_filters(file_name, &asef->n_rows, &asef->n_cols, &asef->lrect, &asef->rrect, &asef->lfilter, &asef->rfilter); asef->lfilter_dft = cvCreateMat(asef->n_rows, asef->n_cols, CV_32FC1); asef->rfilter_dft = cvCreateMat(asef->n_rows, asef->n_cols, CV_32FC1); asef->image = cvCreateMat(asef->n_rows, asef->n_cols, CV_32FC1); asef->image_tile = cvCreateMat(asef->n_rows, asef->n_cols, CV_8UC1); asef->lcorr = cvCreateMat(asef->n_rows, asef->n_cols, CV_32FC1); asef->rcorr = cvCreateMat(asef->n_rows, asef->n_cols, CV_32FC1); asef->lroi = cvCreateMatHeader(asef->n_rows, asef->n_cols, CV_32FC1); asef->rroi = cvCreateMatHeader(asef->n_rows, asef->n_cols, CV_32FC1); cvDFT(asef->lfilter, asef->lfilter_dft, CV_DXT_FORWARD, 0); cvDFT(asef->rfilter, asef->rfilter_dft, CV_DXT_FORWARD, 0); cvGetSubRect(asef->lcorr, asef->lroi, asef->lrect); cvGetSubRect(asef->rcorr, asef->rroi, asef->rrect); asef->lut = cvCreateMat(256, 1, CV_32FC1); for (int i = 0; i<256; i++){ cvmSet(asef->lut, i, 0, 1.0 + i); } cvLog(asef->lut, asef->lut); }
CLIFIntegralResult clifGrayscaleIntegral(const IplImage* source, CLIFEnvironmentData* data, const cl_bool use_opencl) { CLIFIntegralResult ret; if(!use_opencl) { IplImage* grayscale = cvCreateImage(cvSize(source->width, source->height), IPL_DEPTH_8U, 1); cvCvtColor(source, grayscale, CV_BGR2GRAY); ret.image = cvCreateMat(source->height + 1, source->width + 1, CV_32SC1); ret.square_image = cvCreateMat(source->height + 1, source->width + 1, CV_64FC1); cvIntegral(grayscale, ret.image, ret.square_image); cvReleaseImage(&grayscale); return ret; } cl_int error = CL_SUCCESS; // Init buffer error = clEnqueueWriteBuffer(data->environment.queue, data->bgr_to_gray_data.buffers[0], CL_FALSE, 0, source->widthStep * source->height, source->imageData, 0, NULL, NULL); clCheckOrExit(error); // Run kernel error = clEnqueueNDRangeKernel(data->environment.queue, data->environment.kernels[0], 2, NULL, data->bgr_to_gray_data.global_size, data->bgr_to_gray_data.local_size, 0, NULL, NULL); clCheckOrExit(error); // Set as arg the output of greyscale clSetKernelArg(data->environment.kernels[1], 0, sizeof(cl_mem), &(data->bgr_to_gray_data.buffers[1])); clCheckOrExit(error); // Run sum rows kernel error = clEnqueueNDRangeKernel(data->environment.queue, data->environment.kernels[1], 1, NULL, &(data->integral_image_data.global_size[0]), &(data->integral_image_data.local_size[0]), 0, NULL, NULL); clCheckOrExit(error); // Run sum cols kernel error = clEnqueueNDRangeKernel(data->environment.queue, data->environment.kernels[2], 1, NULL, &(data->integral_image_data.global_size[1]), &(data->integral_image_data.local_size[1]), 0, NULL, NULL); clCheckOrExit(error); // Read result cl_uint* result = (cl_uint*)clEnqueueMapBuffer(data->environment.queue, data->integral_image_data.buffers[3], CL_TRUE, CL_MAP_READ, 0, (source->width + 1) * (source->height + 1) * sizeof(cl_uint), 0, NULL, NULL, &error); clCheckOrExit(error); cl_ulong* square_result = (cl_ulong*)clEnqueueMapBuffer(data->environment.queue, data->integral_image_data.buffers[4], CL_TRUE, CL_MAP_READ, 0, (source->width + 1) * (source->height + 1) * sizeof(cl_ulong), 0, NULL, NULL, &error); clCheckOrExit(error); data->integral_image_data.ptr = result; // Return ret.image = cvCreateMatHeader(source->height + 1, source->width + 1, CV_32SC1); cvSetData(ret.image, result, source->width + 1); ret.square_image = cvCreateMatHeader(source->height + 1, source->width + 1, CV_64FC1); cvSetData(ret.square_image, square_result, source->width + 1); return ret; }
int asef_initialze(AsefEyeLocator *asef, const char *asef_file_name, const char *fd_file_name){ if ( !asef || !asef_file_name || !fd_file_name || strlen(asef_file_name)==0 || strlen(fd_file_name)==0) return -1; // For face detection: asef->face_detection_buffer = cvCreateMemStorage(0); asef->face_detection_classifier = fd_load_detector( fd_file_name ); if ( !asef->face_detection_classifier ) return -1; // For asef eye locator: if ( load_asef_filters(asef_file_name, &asef->n_rows, &asef->n_cols, &asef->lrect, &asef->rrect, &asef->lfilter, &asef->rfilter) ) return -1; asef->lfilter_dft = cvCreateMat(asef->n_rows, asef->n_cols, CV_32FC1); asef->rfilter_dft = cvCreateMat(asef->n_rows, asef->n_cols, CV_32FC1); asef->scaled_face_image_32fc1 = cvCreateMat(asef->n_rows, asef->n_cols, CV_32FC1); asef->scaled_face_image_8uc1 = cvCreateMat(asef->n_rows, asef->n_cols, CV_8UC1); asef->lcorr = cvCreateMat(asef->n_rows, asef->n_cols, CV_32FC1); asef->rcorr = cvCreateMat(asef->n_rows, asef->n_cols, CV_32FC1); asef->lroi = cvCreateMatHeader(asef->n_rows, asef->n_cols, CV_32FC1); asef->rroi = cvCreateMatHeader(asef->n_rows, asef->n_cols, CV_32FC1); asef->lut = cvCreateMat(256, 1, CV_32FC1); if ( !(asef->lfilter_dft && asef->rfilter_dft && asef->scaled_face_image_32fc1 && asef->scaled_face_image_8uc1 && asef->lcorr && asef->rcorr && asef->lroi && asef->rroi && asef->lut) ){ return -1; } cvDFT(asef->lfilter, asef->lfilter_dft, CV_DXT_FORWARD, 0); cvDFT(asef->rfilter, asef->rfilter_dft, CV_DXT_FORWARD, 0); cvGetSubRect(asef->lcorr, asef->lroi, asef->lrect); cvGetSubRect(asef->rcorr, asef->rroi, asef->rrect); for (int i = 0; i<256; i++){ cvmSet(asef->lut, i, 0, 1.0 + i); } cvLog(asef->lut, asef->lut); return 0; }
Calibrator() : ROS_Slave() { register_sink(image_in = new FlowImage("image_in"), ROS_CALLBACK(Calibrator, image_received)); codec_in = new ImageCodec<FlowImage>(image_in); register_source(image_out = new FlowImage("image_out")); codec_out = new ImageCodec<FlowImage>(image_out); register_sink(observe = new FlowPTZActuatorNoSub("observe"), ROS_CALLBACK(Calibrator, ptz_received)); register_source(control = new FlowPTZActuatorNoSub("control")); register_sink(key = new FlowSDLKeyEvent("key"), ROS_CALLBACK(Calibrator, key_received)); register_with_master(); cvimage_in = cvCreateMatHeader(480, 704, CV_8UC3); cvimage_out = cvCreateMatHeader(480, 704, CV_8UC3); cvimage_bgr = cvCreateMat(480, 704, CV_8UC3); cvimage_undistort = cvCreateMat(480, 704, CV_8UC3); if ((intrinsic_matrix = (CvMat*)cvLoad("intrinsic.dat")) == 0) { intrinsic_matrix = cvCreateMat( 3, 3, CV_32FC1 ); } if ((distortion_coeffs = (CvMat*)cvLoad("distortion.dat")) == 0) { distortion_coeffs = cvCreateMat( 4, 1, CV_32FC1 ); } matToScreen(intrinsic_matrix, "intrinsic"); matToScreen(distortion_coeffs, "distortion"); calibrated = false; undistort = false; centering = false; take_pic = false; img_cnt = 0; time_t rawtime; struct tm* timeinfo; time(&rawtime); timeinfo = localtime(&rawtime); sprintf(dir_name, "images/%.2d%.2d%.2d_%.2d%.2d%.2d", timeinfo->tm_mon + 1, timeinfo->tm_mday,timeinfo->tm_year - 100,timeinfo->tm_hour, timeinfo->tm_min, timeinfo->tm_sec); if (mkdir(dir_name, 0755)) { std::cout << "Failed to make directory: " << dir_name; } last_corners = new CvPoint2D32f[12*12]; }
static CvSpillTree* icvCreateSpillTree( const CvMat* raw_data, const int naive, const double rho, const double tau ) { int n = raw_data->rows; int d = raw_data->cols; CvSpillTree* tr = (CvSpillTree*)cvAlloc( sizeof(CvSpillTree) ); tr->root = (CvSpillTreeNode*)cvAlloc( sizeof(CvSpillTreeNode) ); memset(tr->root, 0, sizeof(CvSpillTreeNode)); tr->refmat = (CvMat**)cvAlloc( sizeof(CvMat*)*n ); tr->total = n; tr->naive = naive; tr->rho = rho; tr->tau = tau; tr->type = raw_data->type; // tie a link-list to the root node tr->root->lc = (CvSpillTreeNode*)cvAlloc( sizeof(CvSpillTreeNode) ); memset(tr->root->lc, 0, sizeof(CvSpillTreeNode)); tr->root->lc->center = cvCreateMatHeader( 1, d, tr->type ); cvSetData( tr->root->lc->center, _dispatch_mat_ptr(raw_data, 0), raw_data->step ); tr->refmat[0] = tr->root->lc->center; tr->root->lc->lc = NULL; tr->root->lc->leaf = true; tr->root->lc->i = 0; CvSpillTreeNode* node = tr->root->lc; for ( int i = 1; i < n; i++ ) { CvSpillTreeNode* newnode = (CvSpillTreeNode*)cvAlloc( sizeof(CvSpillTreeNode) ); memset(newnode, 0, sizeof(CvSpillTreeNode)); newnode->center = cvCreateMatHeader( 1, d, tr->type ); cvSetData( newnode->center, _dispatch_mat_ptr(raw_data, i*d), raw_data->step ); tr->refmat[i] = newnode->center; newnode->lc = node; newnode->i = i; newnode->leaf = true; newnode->rc = NULL; node->rc = newnode; node = newnode; } tr->root->rc = node; tr->root->cc = n; icvDFSInitSpillTreeNode( tr, d, tr->root ); return tr; }
TDV_NAMESPACE_BEGIN bool ImageReader::update() { WriteGuard<ReadWritePipe<CvMat*> > wg(m_wpipe); if ( m_cImg < m_filenames.size() ) { const std::string &filename(m_filenames[m_cImg++]); IplImage *img = cvLoadImage(filename.c_str()); if ( img != NULL ) { #if 0 CvMat *mat = cvCreateMatHeader(img->height, img->width, CV_8UC3); mat = cvGetMat(img, mat); #else CvMat *mat = cvCreateMat(img->height, img->width, CV_8UC3); cvConvertImage(img, mat, CV_CVTIMG_SWAP_RB); cvReleaseImage(&img); #endif wg.write(mat); } else { throw Exception(boost::format("can't open image: %1%") % filename); } } return wg.wasWrite(); }
void ofxCvBrightnessContrast::setBrightnessAndContrast(ofxCvImage& img, float brightnessAmount, float contrastAmount){ brightnessVal = MAX(-127, MIN(127, brightnessAmount)); contrastVal = MAX(-127, MIN(127, contrastAmount)); unsigned char data[ 256 ]; CvMat * matrix; double delta, a, b; matrix = cvCreateMatHeader( 1, 256, CV_8UC1 ); cvSetData( matrix, data, 0 ); if ( contrastVal>0 ) { delta = (127.0f*contrastVal) / 128.0f; a = 255.0f / ( 255.0f-(delta*2.0f) ); b = a * (brightnessVal-delta); } else { delta = (-128.0f*contrastVal) / 128.0f; a = ( 256.0f-(delta*2.0f) ) / 255.0f; b = ( a*brightnessVal )+delta; } for( int i=0; i<256; i++ ) { int value = cvRound( (a*i)+b ); data[i] = (unsigned char) min( max(0,value), 255 ); } cvLUT( img.getCvImage(), img.getCvImage(), matrix ); cvReleaseMat( &matrix ); }
void Filters::lowPass(VRFrame* frame, int size) { IplImage* imgDst = 0; IplImage* imgAux = 0; IplImage* imgNew = 0; VRFrame* frameAux; Log::writeLog("%s :: param: frame[%x] size[%d]", __FUNCTION__, frame, size); //Ajuste do tamanho da matriz. if (size > 9) size = 9; int cols_i = size; int rows_i = size; int total_size = 0; CvMat *filter = 0; total_size=(int)pow((double)size,2); // Máscara para realizar o processo de convolução. ///double convMask[total_size]; double * convMask = new double[total_size]; // Cria uma imagem com os mesmos parâmetros da original. frameAux = new VRFrame(frame); imgDst = VRFrame::imgAlloc(frameAux); imgAux = VRFrame::imgAlloc(frameAux); imgNew = VRFrame::imgAlloc(frameAux); // Monta a máscara com o tamanho que foi passado como parâmetro. for (int i=0; i<total_size; i++) convMask[i] = (double)1/(double)total_size; imgAux->imageData = frameAux->data->imageData; imgAux->widthStep = frameAux->data->width; imgDst->imageData = imgAux->imageData; imgDst->widthStep = imgAux->width; filter = cvCreateMatHeader(rows_i, cols_i, CV_64FC1); cvSetData(filter, convMask, cols_i*8); cvFilter2D(imgAux, imgDst, filter, cvPoint(-1,-1)); VRFrame::imgCopy(imgDst, imgNew); frame->setImage(imgNew); // Desaloca os temporários VRFrame::imgDealloc(imgAux); VRFrame::imgDealloc(imgDst); delete[] convMask; delete frameAux; }
//-------------------------------------------------------------- ofImage ofxContrast::setBrightness(ofImage& _img, float brightnessAmount){ ofxCvColorImage cvimg; cvimg.allocate(_img.width, _img.height); cvimg.setFromPixels(_img.getPixels(), _img.width, _img.height); float brightnessVal = MAX(-127, MIN(127, brightnessAmount)); unsigned char data[ 256 ]; CvMat * matrix; matrix = cvCreateMatHeader( 1, 256, CV_8UC1 ); cvSetData( matrix, data, 0 ); for( int i=0; i<256; i++ ) { int value = cvRound( i+brightnessVal ); data[i] = (unsigned char) min( max(0,value), 255 ); } cvLUT( cvimg.getCvImage(), cvimg.getCvImage(), matrix ); cvReleaseMat( &matrix ); ofImage ofimg; ofimg.allocate(_img.width, _img.height, OF_IMAGE_COLOR); ofimg.setFromPixels(cvimg.getPixels(), _img.width, _img.height, OF_IMAGE_COLOR); return ofimg; }
int main(int argc, char* argv[]) { //创建矩阵 方式1 直接创建 CvMat* pmat1; pmat1 = cvCreateMat(8, 9, CV_32FC1); //创建矩阵方式2 先创建矩阵头部 再创建矩阵的数据块的内存空间 CvMat* pmat2; pmat2 = cvCreateMatHeader(4, 5, CV_8UC1); cvCreateData(pmat2); //创建矩阵方式3 通过数据创建矩阵 float data[4] = { 3, 4, 6, 0 }; CvMat pmat3; cvInitMatHeader(&pmat3, 2, 2, CV_32FC1, data); //创建矩阵方式4 通过已有矩阵进行克隆 CvMat* pmat4; pmat4 = cvCloneMat(pmat2); //访问矩阵的相关属性 test(pmat2); //释放矩阵的内存空间 cvReleaseMat(&pmat1); cvReleaseMat(&pmat2); cvReleaseMat(&pmat4); return 0; }
void event_mouse(int button, int state, int x, int y) { int err; if (state == GLUT_DOWN) { // Mouse down = find template // Do hough transform to find ball center and radius struct frame *fr = get_frame(active_window->frames, active_window->cur); err = houghTransform(active_window, active_window->cur, x, y); if (err) { return; } fr->flag |= HAS_HOUGH; active_window->guess.x = fr->hough.x; active_window->guess.y = fr->hough.y; // Make a subimage containing the template CvRect r = cvRect(fr->hough.x-fr->hough.radius, fr->hough.y-fr->hough.radius, fr->hough.radius*2, fr->hough.radius*2); CvMat *sub = cvCreateMatHeader(fr->hough.radius*2, fr->hough.radius*2, CV_32FC1); cvGetSubRect(fr->image, sub, r); active_window->tmpl = sub; // Match (could be left out) templateMatch(active_window, active_window->cur, MARGIN, sub); fr->flag |= HAS_MATCH; glutPostRedisplay(); // Calculate meters per pixel active_window->mpp = atof(active_window->argv[2])/(fr->hough.radius*2); printf("Getting mpp: %f/%f = %f\n", atof(active_window->argv[2]), fr->hough.radius*2, active_window->mpp); } }
void Classifier::optical_flow(const IplImage *frame, double *xD, double *yD) { double xDiff = 0; double yDiff = 0; //double xQDiff = 0; //double yQDiff = 0; if (prevFrame) { /* Optical flow for entire image */ CvSize img_sz = cvGetSize(frame); IplImage *imgA = cvCreateImage(img_sz, IPL_DEPTH_8U, 1); IplImage *imgB = cvCreateImage(img_sz, IPL_DEPTH_8U, 1); cvCvtColor(frame, imgA, CV_BGR2GRAY); cvCvtColor(prevFrame, imgB, CV_BGR2GRAY); CvMat* velx = cvCreateMatHeader( img_sz.height, img_sz.width, CV_32FC1 ); cvCreateData( velx ); CvMat* vely = cvCreateMatHeader( img_sz.height, img_sz.width, CV_32FC1 ); cvCreateData( vely ); cvCalcOpticalFlowLK( imgA, imgB, cvSize(15, 15), velx, vely ); xDiff = cvAvg(velx).val[0]; yDiff = cvAvg(vely).val[0]; *xD = xDiff; *yD = yDiff; } // if else { prevFrame = cvCreateImage ( cvGetSize(frame), frame->depth, frame->nChannels ); } // else cvCopy(frame, prevFrame); }
float emd_float(float *sig1, int len1, float *sig2, int len2, float *costmat) { CvMat *s1=cvCreateMatHeader(len1,1,CV_32F), *s2=cvCreateMatHeader(len2,1,CV_32F), *c =cvCreateMatHeader(len1,len2,CV_32F); s1->data.fl=sig1; s2->data.fl=sig2; c->data.fl=costmat; float lb=1; float ret=cvCalcEMD2(s1,s2,CV_DIST_USER,NULL,c,NULL,NULL,NULL); cvReleaseMat(&s1); cvReleaseMat(&s2); cvReleaseMat(&c); return ret; }
void adjustBrightnessContrast(IplImage *&src, int Brightness, int Contrast) { unsigned char LookupTableData[256]; CvMat *LookupTableMatrix; double Delta; double a, b; int y; IplImage *filterB = cvCreateImage(cvGetSize(src), (src)->depth, 1); IplImage *filterG = cvCreateImage(cvGetSize(src), (src)->depth, 1); IplImage *filterR = cvCreateImage(cvGetSize(src), (src)->depth, 1); cvSplit(src, filterB, filterG, filterR, 0); //Brightness/Contrast Formula if(Contrast > 0) { Delta = 127 * Contrast / 100; a=255 / (255 - Delta * 2); b = a * (Brightness - Delta); } else { Delta = -128 * Contrast / 100; a = (256 - Delta*2) / 255; b = a * Brightness + Delta; } for(int x = 0 ; x < 256 ; x++) { y=(int)(a * x + b); if(y < 0) y = 0; else if(y > 255) y = 255; LookupTableData[x]=(uchar)y; } LookupTableMatrix = cvCreateMatHeader(1, 256, CV_8UC1); cvSetData(LookupTableMatrix, LookupTableData, 0); cvLUT(filterB, filterB, LookupTableMatrix); cvLUT(filterG, filterG, LookupTableMatrix); cvLUT(filterR, filterR, LookupTableMatrix); IplImage *dst = cvCreateImage(cvGetSize(src), src->depth, src->nChannels); cvMerge(filterB, filterG, filterR, 0, dst); cvReleaseImage(&src); src = cvCloneImage(dst); cvReleaseImage(&dst); cvReleaseImage(&filterB); cvReleaseImage(&filterG); cvReleaseImage(&filterR); cvReleaseMat(&LookupTableMatrix); }//end Brightness/Contrast
int ASEF_Algorithm::initialize() { if (load_asef_filters(haar_cascade_path, &n_rows, &n_cols, &lrect, &rrect, &lfilter, &rfilter)) return -1; lfilter_dft = cvCreateMat(n_rows, n_cols, CV_32FC1); rfilter_dft = cvCreateMat(n_rows, n_cols, CV_32FC1); scaled_face_image_32fc1 = cvCreateMat(n_rows, n_cols, CV_32FC1); scaled_face_image_8uc1 = cvCreateMat(n_rows, n_cols, CV_8UC1); lcorr = cvCreateMat(n_rows, n_cols, CV_32FC1); rcorr = cvCreateMat(n_rows, n_cols, CV_32FC1); lroi = cvCreateMatHeader(n_rows, n_cols, CV_32FC1); rroi = cvCreateMatHeader(n_rows, n_cols, CV_32FC1); lut = cvCreateMat(256, 1, CV_32FC1); point = cvCreateMat(1, 2, CV_32FC1); if (!(lfilter_dft && rfilter_dft && scaled_face_image_32fc1 && scaled_face_image_8uc1 && lcorr && rcorr && lroi && rroi && lut)) { return -1; } cvDFT(lfilter, lfilter_dft, CV_DXT_FORWARD, 0); cvDFT(rfilter, rfilter_dft, CV_DXT_FORWARD, 0); cvGetSubRect(lcorr, lroi, lrect); cvGetSubRect(rcorr, rroi, rrect); for (int i = 0; i < 256; i++) { cvmSet(lut, i, 0, 1.0 + i); } cvLog(lut, lut); isInitialized = true; return 0; }
IplImage* onHistEZ(IplImage* gray){ IplImage *src; if(gray->nChannels!=1){ printf("不是单通道图像\n"); return NULL; } uchar* ImgData=(uchar*)(gray->imageData); int rows=gray->widthStep; int cols=gray->height; int gmax=0,gmin=255;//用来保存灰度范围 int ihist[256];//图像直方图 int nn[256];//直方图累加分布数组 uchar T[256];//保存均衡化后的直方图 CvMat *T_mat; int i=0,val=0; int sum; memset(ihist,0,sizeof(ihist)); sum=rows*cols; for(i=0;i<sum;i++){ ihist[*ImgData]++;//灰度统计 if((int)(*ImgData)>gmax) gmax=(int)(*ImgData); if((int)(*ImgData)<gmin) gmin=(int)(*ImgData); ImgData++; } //建立灰度累加分布直方图 for(i=0;i<256;i++){ val=val+ihist[i]; nn[i]=val; } //归一化直方图 T_mat=cvCreateMatHeader(1,256,CV_8UC1); for(i=0;i<256;i++){ T[i]=(uchar)(((nn[i]<<8)-nn[i])/sum);//灰度范围【0,255】 } T_mat=cvCreateMatHeader(1,256,CV_8UC1); cvSetData(T_mat,T,0);//接上数据块 cvLUT(gray,gray,T_mat);//反向映射 return gray; }
int templateMatch(struct window *window, int frame, int diam, CvMat *tmpl) { // Init struct frame *fr = get_frame(window->frames, frame); // printf("Guess is (%d, %d), diameter is %d\n", window->guess.x, window->guess.y, diam); float init_x = (float)window->guess.x-diam, init_y = (float)window->guess.y-diam; // See if we can guess were the ball might be CvRect rect = cvRect(init_x, init_y, diam*2, diam*2); // Make sure rect is with image rect.x = rect.x < 0 ? 0 : rect.x; rect.y = rect.y < 0 ? 0 : rect.y; rect.width = rect.x+rect.width > fr->image->cols ? fr->image->cols-rect.x : rect.width; rect.height = rect.y+rect.height > fr->image->rows ? fr->image->rows-rect.y : rect.height; // Get sub rect CvMat *sub = cvCreateMatHeader(rect.height, rect.width, CV_32F); cvGetSubRect(fr->image, sub, rect); CvMat *res = cvCreateMat(sub->rows - tmpl->rows+1, sub->cols - tmpl->cols+1, CV_32F); // Match cvMatchTemplate(sub, tmpl, res, CV_TM_SQDIFF); // Find value and location of min = upper-left corner of template match CvPoint pt; double val; cvMinMaxLoc(res, &val, 0, &pt, 0, 0); // printf("#%d: value of match is %f\n", frame, val); if (val > 20000000) { // Works on sample video // printf("Doubling search area\n"); templateMatch(window, frame, diam*2, tmpl); return 0; } // Match result struct MatchResult mr; mr.x = init_x+pt.x; mr.y = init_y+pt.y; mr.found = 1; fr->match = mr; window->guess.x = mr.x; window->guess.y = mr.y; return 0; }
CLIFIntegralResult clifIntegral(const IplImage* source, CLIFEnvironmentData* data, const cl_bool use_opencl) { CLIFIntegralResult ret; if(!use_opencl) { ret.image = cvCreateMat(source->height + 1, source->width + 1, CV_32SC1); ret.square_image = cvCreateMat(source->height + 1, source->width + 1, CV_64FC1); cvIntegral(source, ret.image, ret.square_image); return ret; } cl_int error = CL_SUCCESS; // Init buffer error = clEnqueueWriteBuffer(data->environment.queue, data->integral_image_data.buffers[0], CL_FALSE, 0, source->width * source->height, source, 0, NULL, NULL); clCheckOrExit(error); // Run sum rows kernel error = clEnqueueNDRangeKernel(data->environment.queue, data->environment.kernels[1], 1, NULL, &(data->integral_image_data.global_size[0]), &(data->integral_image_data.local_size[0]), 0, NULL, NULL); clCheckOrExit(error); // Run sum cols kernel error = clEnqueueNDRangeKernel(data->environment.queue, data->environment.kernels[2], 1, NULL, &(data->integral_image_data.global_size[1]), &(data->integral_image_data.local_size[1]), 0, NULL, NULL); clCheckOrExit(error); // Read result cl_uint* result = (cl_uint*)clEnqueueMapBuffer(data->environment.queue, data->integral_image_data.buffers[3], CL_TRUE, CL_MAP_READ, 0, (source->width + 1) * (source->height + 1) * sizeof(cl_uint), 0, NULL, NULL, &error); clCheckOrExit(error); cl_ulong* square_result = (cl_ulong*)clEnqueueMapBuffer(data->environment.queue, data->integral_image_data.buffers[4], CL_TRUE, CL_MAP_READ, 0, (source->width + 1) * (source->height + 1) * sizeof(cl_ulong), 0, NULL, NULL, &error); clCheckOrExit(error); data->integral_image_data.ptr = result; // Return ret.image = cvCreateMat(source->height + 1, source->width + 1, CV_32SC1); cvSetData(ret.image, result, (source->width + 1) * sizeof(cl_uint)); ret.square_image = cvCreateMatHeader(source->height + 1, source->width + 1, CV_64FC1); cvSetData(ret.square_image, square_result, (source->width + 1) * sizeof(cl_ulong)); return ret; }
// create CvMat and underlying date CV_IMPL CvMat* cvCreateMat( int height, int width, int type ) { CvMat* arr = 0; CV_FUNCNAME( "cvCreateMat" ); __BEGIN__; CV_CALL( arr = cvCreateMatHeader( height, width, type )); CV_CALL( cvCreateData( arr )); __END__; if( cvGetErrStatus() < 0 ) cvReleaseMat( &arr ); return arr; }
void HistgramEqualization(IplImage* src,IplImage* dst) { CvHistogram *hist = 0; const HDIM=256; int n = HDIM; double nn[HDIM]; uchar T[HDIM]; CvMat *T_mat; int x; int sum = 0; // sum of pixels of the source image 图像中象素点的总和 double val = 0; // calculate histgram 计算直方图 hist = cvCreateHist( 1, &n, CV_HIST_ARRAY, 0, 1 ); cvCalcHist( &src, hist, 0, 0 ); // Create Accumulative Distribute Function of histgram val = 0; for ( x = 0; x < n; x++) { val = val + cvGetReal1D (hist->bins, x); nn[x] = val; } // Compute intensity transformation 计算变换函数的离散形式 sum = src->height * src->width; for( x = 0; x < n; x++ ) { T[x] = (uchar) (255 * nn[x] / sum); // range is [0,255] } // Do intensity transform for source image cvCopyImage(src, dst); T_mat = cvCreateMatHeader( 1, 256, CV_8UC1 ); cvSetData( T_mat, T, 0 ); // directly use look-up-table function 直接调用内部函数完成 look-up-table 的过程 cvLUT( src, dst, T_mat ); cvReleaseHist ( &hist ); }
//-------------------------------------------------------------- ofImage ofxContrast::setBrightnessAndContrast(ofImage& _img, float brightnessAmount, float contrastAmount){ ofxCvColorImage cvimg; cvimg.allocate(_img.width, _img.height); cvimg.setFromPixels(_img.getPixels(), _img.width, _img.height); float brightnessVal = MAX(-127, MIN(127, brightnessAmount)); float contrastVal = MAX(-127, MIN(127, contrastAmount)); unsigned char data[ 256 ]; CvMat * matrix; double delta, a, b; matrix = cvCreateMatHeader( 1, 256, CV_8UC1 ); cvSetData( matrix, data, 0 ); if ( contrastVal>0 ) { delta = (127.0f*contrastVal) / 128.0f; a = 255.0f / ( 255.0f-(delta*2.0f) ); b = a * (brightnessVal-delta); } else { delta = (-128.0f*contrastVal) / 128.0f; a = ( 256.0f-(delta*2.0f) ) / 255.0f; b = ( a*brightnessVal )+delta; } for( int i=0; i<256; i++ ) { int value = cvRound( (a*i)+b ); data[i] = (unsigned char) min( max(0,value), 255 ); } cvLUT( cvimg.getCvImage(), cvimg.getCvImage(), matrix ); cvReleaseMat( &matrix ); ofImage ofimg; ofimg.allocate(_img.width, _img.height, OF_IMAGE_COLOR); ofimg.setFromPixels(cvimg.getPixels(), _img.width, _img.height, OF_IMAGE_COLOR); return ofimg; }
CV_IMPL CvMat* cvCloneMat( const CvMat* src ) { CvMat* dst = 0; CV_FUNCNAME( "cvCloneMat" ); __BEGIN__; if( !_CV_IS_ARR( src )) CV_ERROR( CV_StsBadArg, "Bad CvMat header" ); CV_CALL( dst = cvCreateMatHeader( src->height, src->width, src->type )); if( src->data.ptr ) { CV_CALL( cvCreateData( dst )); CV_CALL( cvCopy( src, dst )); } __END__; return dst; }
const CvMat* CvMLData::get_responses() { CV_FUNCNAME( "CvMLData::get_responses_ptr" ); __BEGIN__; int var_count = 0; if ( !values ) CV_ERROR( CV_StsInternal, "data is empty" ); var_count = values->cols; if ( response_idx < 0 || response_idx >= var_count ) return 0; if ( !response_out ) response_out = cvCreateMatHeader( values->rows, 1, CV_32FC1 ); else cvInitMatHeader( response_out, values->rows, 1, CV_32FC1); cvGetCol( values, response_out, response_idx ); __END__; return response_out; }
void process_image( char* buffer, int len ) { GPtrArray *markers; int i; CvMat *m = cvCreateMatHeader (len, 1, CV_8UC1); cvSetData (m, buffer, 1); IplImage *color = cvDecodeImage (m, CV_LOAD_IMAGE_COLOR); assert (color != NULL); IplImage *bw = cvCreateImage(cvGetSize(color), color->depth, 1); cvCvtColor (color, bw, CV_RGB2GRAY); koki_camera_params_t params; params.size.x = bw->width; params.size.y = bw->height; params.principal_point.x = params.size.x / 2; params.principal_point.y = params.size.y / 2; params.focal_length.x = 571.0; params.focal_length.y = 571.0; markers = koki_find_markers(koki, bw, 0.11, ¶ms); assert(markers != NULL); for (i=0; i<markers->len; i++){ koki_marker_t *marker; marker = g_ptr_array_index(markers, i); printf("\n(%d) Marker #%d:\n", i, marker->code); } cvRelease ((void**)&bw); cvShowImage ("Source", color); cvWaitKey (0); cvRelease ((void**)&color); cvRelease ((void**)&m); }
// A Simple Camera Capture Framework int main() { CvCapture* capture = cvCaptureFromCAM( 0 ); if( !capture ) { fprintf( stderr, "ERROR: capture is NULL \n" ); return -1; } #ifdef HALF_SIZE_CAPTURE cvSetCaptureProperty(capture, CV_CAP_PROP_FRAME_WIDTH, 352/2); cvSetCaptureProperty(capture, CV_CAP_PROP_FRAME_HEIGHT, 288/2); #endif // Create a window in which the captured images will be presented cvNamedWindow( "Source Image Window", CV_WINDOW_AUTOSIZE ); cvNamedWindow( "Back Projected Image", CV_WINDOW_AUTOSIZE ); cvNamedWindow( "Brightness and Contrast Window", CV_WINDOW_AUTOSIZE ); cvNamedWindow( "Blob Output Window", CV_WINDOW_AUTOSIZE ); cvNamedWindow( "Histogram Window", 0); cvNamedWindow( "Rainbow Window", CV_WINDOW_AUTOSIZE ); // Capture one frame to get image attributes: source_frame = cvQueryFrame( capture ); if( !source_frame ) { fprintf( stderr, "ERROR: frame is null...\n" ); return -1; } cvCreateTrackbar("histogram\nnormalization", "Back Projected Image", &normalization_sum, 6000, NULL); cvCreateTrackbar("brightness", "Brightness and Contrast Window", &_brightness, 200, NULL); cvCreateTrackbar("contrast", "Brightness and Contrast Window", &_contrast, 200, NULL); cvCreateTrackbar("threshold", "Blob Output Window", &blob_extraction_threshold, 255, NULL); cvCreateTrackbar("min blob size", "Blob Output Window", &min_blob_size, 2000, NULL); cvCreateTrackbar("max blob size", "Blob Output Window", &max_blob_size, source_frame->width*source_frame->height/4, NULL); inputImage = cvCreateImage(cvGetSize(source_frame), IPL_DEPTH_8U, 1); histAdjustedImage = cvCreateImage(cvGetSize(source_frame), IPL_DEPTH_8U, 1); outputImage = cvCreateImage(cvGetSize(source_frame), IPL_DEPTH_8U, 3 ); hist_image = cvCreateImage(cvSize(320,200), 8, 1); rainbowImage = cvCreateImage(cvGetSize(source_frame), IPL_DEPTH_8U, 3 ); // object that will contain blobs of inputImage CBlobResult blobs; CBlob my_enumerated_blob; cvInitFont(&font, CV_FONT_HERSHEY_SIMPLEX|CV_FONT_ITALIC, hScale, vScale, 0, lineWidth); // Some brightness/contrast stuff: bright_cont_image = cvCloneImage(inputImage); lut_mat = cvCreateMatHeader( 1, 256, CV_8UC1 ); cvSetData( lut_mat, lut, 0 ); while( 1 ) { // Get one frame source_frame = cvQueryFrame( capture ); if( !source_frame ) { fprintf( stderr, "ERROR: frame is null...\n" ); getchar(); break; } cvShowImage( "Source Image Window", source_frame ); // Do not release the frame! cvCvtColor(source_frame, inputImage, CV_RGB2GRAY); // Histogram Stuff! my_hist = cvCreateHist(1, hist_size_array, CV_HIST_ARRAY, ranges, 1); cvCalcHist( &inputImage, my_hist, 0, NULL ); cvNormalizeHist(my_hist, normalization_sum); // NOTE: First argument MUST have an ampersand, or a segmentation fault will result cvCalcBackProject(&inputImage, histAdjustedImage, my_hist); // Histogram Picture int bin_w; float max_value = 0; cvGetMinMaxHistValue( my_hist, 0, &max_value, 0, 0 ); cvScale( my_hist->bins, my_hist->bins, ((double)hist_image->height)/max_value, 0 ); cvSet( hist_image, cvScalarAll(255), 0 ); bin_w = cvRound((double)hist_image->width/hist_size); for(int i = 0; i < hist_size; i++ ) cvRectangle( hist_image, cvPoint(i*bin_w, hist_image->height), cvPoint((i+1)*bin_w, hist_image->height - cvRound(cvGetReal1D(my_hist->bins,i))), cvScalarAll(0), -1, 8, 0 ); cvShowImage( "Histogram Window", hist_image ); cvShowImage("Back Projected Image", histAdjustedImage); // Brightness/contrast loop stuff: int brightness = _brightness - 100; int contrast = _contrast - 100; /* * The algorithm is by Werner D. Streidt * (http://visca.com/ffactory/archives/5-99/msg00021.html) */ if( contrast > 0 ) { double delta = 127.*contrast/100; double a = 255./(255. - delta*2); double b = a*(brightness - delta); for(int i = 0; i < 256; i++ ) { int v = cvRound(a*i + b); if( v < 0 ) v = 0; if( v > 255 ) v = 255; lut[i] = (uchar)v; } } else { double delta = -128.*contrast/100; double a = (256.-delta*2)/255.; double b = a*brightness + delta; for(int i = 0; i < 256; i++ ) { int v = cvRound(a*i + b); if( v < 0 ) v = 0; if( v > 255 ) v = 255; lut[i] = (uchar)v; } } cvLUT( inputImage, bright_cont_image, lut_mat ); cvShowImage( "Brightness and Contrast Window", bright_cont_image); // --------------- // Blob Manipulation Code begins here: // Extract the blobs using a threshold of 100 in the image blobs = CBlobResult( bright_cont_image, NULL, blob_extraction_threshold, true ); // discard the blobs with less area than 5000 pixels // ( the criteria to filter can be any class derived from COperadorBlob ) blobs.Filter( blobs, B_INCLUDE, CBlobGetArea(), B_GREATER_OR_EQUAL, min_blob_size); blobs.Filter( blobs, B_EXCLUDE, CBlobGetArea(), B_GREATER, max_blob_size); // build an output image equal to the input but with 3 channels (to draw the coloured blobs) cvMerge( bright_cont_image, bright_cont_image, bright_cont_image, NULL, outputImage ); // plot the selected blobs in a output image for (int i=0; i < blobs.GetNumBlobs(); i++) { blobs.GetNthBlob( CBlobGetArea(), i, my_enumerated_blob ); // Color 5/6 of the color wheel (300 degrees) my_enumerated_blob.FillBlob( outputImage, cv_hsv2rgb((float)i/blobs.GetNumBlobs() * 300, 1, 1)); } // END Blob Manipulation Code // --------------- sprintf(str, "Count: %d", blobs.GetNumBlobs()); cvPutText(outputImage, str, cvPoint(50, 25), &font, cvScalar(255,0,255)); cvShowImage("Blob Output Window", outputImage); /* // Rainbow manipulation: for (int i=0; i < CV_CAP_PROP_FRAME_WIDTH; i++) { for (int j=0; j < CV_CAP_PROP_FRAME_HEIGHT; j++) { // This line is not figure out yet... // pixel_color_set = ((uchar*)(rainbowImage->imageData + rainbowImage->widthStep * j))[i * 3] ((uchar*)(rainbowImage->imageData + rainbowImage->widthStep * j))[i * 3] = 30; ((uchar*)(rainbowImage->imageData + rainbowImage->widthStep * j))[i * 3 + 1] = 30; ((uchar*)(rainbowImage->imageData + rainbowImage->widthStep * j))[i * 3 + 2] = 30; } } cvShowImage("Rainbow Window", rainbowImage); */ //If ESC key pressed, Key=0x10001B under OpenCV 0.9.7(linux version), //remove higher bits using AND operator if( (cvWaitKey(10) & 255) == 27 ) break; } cvReleaseImage(&inputImage); cvReleaseImage(&histAdjustedImage); cvReleaseImage(&hist_image); cvReleaseImage(&bright_cont_image); cvReleaseImage(&outputImage); cvReleaseImage(&rainbowImage); // Release the capture device housekeeping cvReleaseCapture( &capture ); cvDestroyAllWindows(); return 0; }
void mexFunction( int nargout, mxArray *out[], int nargin, const mxArray *in[] ) { /* declare variables */ double *mx_r_in; double *mx_r_out; int output_dim = 3; /* check arguments */ if (nargin != 1 || nargout > 1){ mexErrMsgTxt("Wrong Number of arguments."); exit(1); } // Link input vars to pointers in C mx_r_in = mxGetPr(in[0]); int m = mxGetM(in[0]); int n = mxGetN(in[0]); // Input is a rotation matrix if (m == 3 && n == 3){ output_dim = 1; } // Check input argument: avoid errors if (!((m == 3 && n == 3) || (m == 1 && n == 3) || (m == 3 && n == 1))){ mexPrintf("HELP! ERROR! %d %d\n", m, n); exit(1); } // Create OpenCV array for input variable // If we want to use cvSetData, our matrices are actually the transposed // versions of those that come from Matlab. CvMat *r_in_T = cvCreateMatHeader(m, n, CV_64F); cvSetData (r_in_T, mx_r_in, sizeof(double)*n); // Transpose the matrix CvMat *r_in = cvCreateMat(n, m, CV_64F); cvT(r_in_T, r_in); // Result CvMat *r_out_T = cvCreateMat(output_dim, 3, CV_64F); // Call cvRodrigues cvRodrigues2(r_in, r_out_T); // Allocate memory for the output var out[0] = mxCreateNumericMatrix(3, output_dim, mxDOUBLE_CLASS, mxREAL); mx_r_out = mxGetPr(out[0]); CvMat* r_out = cvCreateMatHeader(3, output_dim, CV_64F); cvSetData (r_out, mx_r_out, sizeof(double)*output_dim); cvT(r_out_T, r_out); // Free all array headers and return cvReleaseMat(&r_in); cvReleaseMatHeader(&r_in_T); cvReleaseMatHeader(&r_out); }
CvArr * PyArray_to_CvArr (PyObject * obj) { // let's try to create a temporary CvMat header that points to the // data owned by obj and reflects its memory layout CvArr * cvarr = NULL; void * raw_data = 0; long rows; long cols; long channels; long step; long mat_type = 7; long element_size = 1; // infer layout from array interface PyObject * interface = PyObject_GetAttrString (obj, "__array_interface__"); // the array interface should be a dict if (PyMapping_Check (interface)) { if (PyMapping_HasKeyString (interface, (char*)"version") && PyMapping_HasKeyString (interface, (char*)"shape") && PyMapping_HasKeyString (interface, (char*)"typestr") && PyMapping_HasKeyString (interface, (char*)"data")) { PyObject * version = PyMapping_GetItemString (interface, (char*)"version"); PyObject * shape = PyMapping_GetItemString (interface, (char*)"shape"); PyObject * typestr = PyMapping_GetItemString (interface, (char*)"typestr"); PyObject * data = PyMapping_GetItemString (interface, (char*)"data"); if (!PyInt_Check (version) || PyInt_AsLong (version) != 3) PyErr_SetString(PyExc_TypeError, "OpenCV understands version 3 of the __array_interface__ only"); else { if (!PyTuple_Check (shape) || PyTuple_Size (shape) < 2 || PyTuple_Size (shape) > 3) PyErr_SetString(PyExc_TypeError, "arrays must have a shape with 2 or 3 dimensions"); else { rows = PyInt_AsLong (PyTuple_GetItem (shape, 0)); cols = PyInt_AsLong (PyTuple_GetItem (shape, 1)); channels = PyTuple_Size (shape) < 3 ? 1 : PyInt_AsLong (PyTuple_GetItem (shape, 2)); if (rows < 1 || cols < 1 || channels < 1 || channels > 4) PyErr_SetString(PyExc_TypeError, "rows and columns must be positive, channels from 1 to 4"); else { // fprintf (stderr, "rows: %ld, cols: %ld, channels %ld\n", rows, cols, channels); fflush (stderr); if (! PyTuple_Check (data) || PyTuple_Size (data) != 2 || !(PyInt_Check (PyTuple_GetItem (data,0)) || PyLong_Check (PyTuple_GetItem (data,0))) || !(PyBool_Check (PyTuple_GetItem (data,1)) && !PyInt_AsLong (PyTuple_GetItem (data,1)))) PyErr_SetString (PyExc_TypeError, "arrays must have a pointer to writeable data"); else { raw_data = PyLong_AsVoidPtr (PyTuple_GetItem (data,0)); // fprintf(stderr, "raw_data: %p\n", raw_data); fflush (stderr); char * format_str = NULL; Py_ssize_t len = 0; if (!PyString_Check (typestr) || PyString_AsStringAndSize (typestr, & format_str, &len) == -1 || len !=3) PyErr_SetString(PyExc_TypeError, "there is something wrong with the format string"); else { // fprintf(stderr, "format: %c %c\n", format_str[1], format_str[2]); fflush (stderr); if (format_str[1] == 'u' && format_str[2] == '1') { element_size = 1; mat_type = CV_MAKETYPE(CV_8U, channels); } else if (format_str[1] == 'i' && format_str[2] == '1') { element_size = 1; mat_type = CV_MAKETYPE(CV_8S, channels); } else if (format_str[1] == 'u' && format_str[2] == '2') { element_size = 2; mat_type = CV_MAKETYPE(CV_16U, channels); } else if (format_str[1] == 'i' && format_str[2] == '2') { element_size = 2; mat_type = CV_MAKETYPE(CV_16S, channels); } else if (format_str[1] == 'i' && format_str[2] == '4') { element_size = 4; mat_type = CV_MAKETYPE(CV_32S, channels); } else if (format_str[1] == 'f' && format_str[2] == '4') { element_size = 4; mat_type = CV_MAKETYPE(CV_32F, channels); } else if (format_str[1] == 'f' && format_str[2] == '8') { element_size = 8; mat_type = CV_MAKETYPE(CV_64F, channels); } else { PyErr_SetString(PyExc_TypeError, "unknown or unhandled element format"); mat_type = CV_USRTYPE1; } // handle strides if given // TODO: implement stride handling step = cols * channels * element_size; if (PyMapping_HasKeyString (interface, (char*)"strides")) { PyObject * strides = PyMapping_GetItemString (interface, (char*)"strides"); if (strides != Py_None) { fprintf(stderr, "we have strides ... not handled!\n"); fflush (stderr); PyErr_SetString(PyExc_TypeError, "arrays with strides not handled yet"); mat_type = CV_USRTYPE1; // use this to denote, we've got an error } Py_DECREF (strides); } // create matrix header if everything is okay if (mat_type != CV_USRTYPE1) { CvMat * temp_matrix = cvCreateMatHeader (rows, cols, mat_type); cvSetData (temp_matrix, raw_data, step); cvarr = temp_matrix; // fprintf(stderr, "step_size: %ld, type: %ld\n", step, mat_type); fflush (stderr); } } } } } } Py_DECREF (data); Py_DECREF (typestr); Py_DECREF (shape); Py_DECREF (version); } } Py_DECREF (interface); return cvarr; }
void defense::ImageToEllipseList(IplImage* TheInput,int PlaneNumber){ priority_queue<TheEllipse, vector<TheEllipse>,less<vector<TheEllipse>::value_type> > EllipQueue; TheTargetsEllipses.clear(); CvMemStorage* G_storage=NULL; G_storage=cvCreateMemStorage(0); CvSeq* contour = 0; IplImage * Maska; Maska = cvCreateImage( cvGetSize(TheInput),IPL_DEPTH_8U,1); int TotalEllip=0; for (int k=0;k<PlaneNumber;k++){ cvInRangeS(TheInput,cvScalarAll((k-1)*255/(float)PlaneNumber),cvScalarAll(k*255/(float)PlaneNumber),Maska); cvSmooth(Maska,Maska,CV_MEDIAN,3); int NC=cvFindContours( Maska, G_storage, &contour, sizeof(CvContour), CV_RETR_EXTERNAL, CV_CHAIN_APPROX_TC89_L1 ); for( ; contour != 0; contour = contour->h_next ) { if ((contour->total > 10 )&&(TotalEllip<MaxEllip)){ CvMat* CountArray; CvBox2D Ellipdesc; CvPoint2D32f * OtroArray; OtroArray = new CvPoint2D32f[contour->total]; for(int q=0;q<contour->total;q++){ CvPoint* p = (CvPoint*)cvGetSeqElem( contour, q ); OtroArray[q].x = (float)p->x; OtroArray[q].y=(float)p->y; } CountArray=cvCreateMatHeader(contour->total,1,CV_32FC2); cvInitMatHeader(CountArray,contour->total,1,CV_32FC2,OtroArray); // calculating the best ellipse Ellipdesc=cvFitEllipse2(CountArray); EllipQueue.push(TheEllipse(Ellipdesc.center.x, Ellipdesc.center.y, Ellipdesc.size.width, Ellipdesc.size.height, Ellipdesc.angle, k*255/PlaneNumber)); TotalEllip++; delete [] OtroArray; cvReleaseMat(&CountArray); } // end of if contour-> total } // end of for contours } // end For the Planes while (!EllipQueue.empty()){ TheTargetsEllipses.push_back(EllipQueue.top()); EllipQueue.pop(); } cvReleaseImage(&Maska); // releasing mem storages if (contour!=NULL){cvClearSeq(contour);} //cvClearMemStorage(storage); if (G_storage!=NULL){cvReleaseMemStorage(&G_storage);} }
void BagOfFeatures::trainSVM_CV(int type, int kernel, double degree, double gamma, double coef0, double C, double nu, double p, int termType, int iterations, double eps, char* fileName) { int i, j, k, l = -1; int totalData = 0; int size, length = dictionary->rows; float *dPtr; //Get the total number of training data for(i = 0; i < numClasses; i++) totalData += data[i].getTrainSize(); //CvMat* trainData = cvCreateMat(totalData, dictionary->rows, CV_32FC1); //CvMat* dataLabel = cvCreateMat(totalData, 1, CV_32FC1); float** trainData = new float* [totalData]; float* dataLabel = new float [totalData]; for(i = 0; i < totalData; i++) trainData[i] = new float [dictionary->rows]; // For each class for(i = 0; i < numClasses; i++) { // Get the number of images size = data[i].getTrainSize(); for(j = 0; j < size; j++) { l++; //Attach the label to it //dataLabel->data.fl[l] = (float)data[i].getLabel(); //dPtr = (float*)(trainData->data.ptr + l*trainData->step); dataLabel[l] = (float)data[i].getLabel(); // Copy the histograms for(k = 0; k < length; k++) { //dPtr[k] = trainObject[i].histogramSet.histogram[j][k]; trainData[l][k] = trainObject[i].histogramSet.histogram[j][k]; } } } CvSVMParams SVMParam_CV; SVMParam_CV.svm_type = type; SVMParam_CV.kernel_type = kernel; SVMParam_CV.degree = degree; SVMParam_CV.gamma = gamma; SVMParam_CV.coef0 = coef0; SVMParam_CV.C = C; SVMParam_CV.nu = nu; SVMParam_CV.p = p; SVMParam_CV.class_weights = NULL; SVMParam_CV.term_crit = cvTermCriteria(termType, iterations, eps); CvMat *dataHeader = cvCreateMatHeader(totalData, dictionary->rows, CV_32FC1); CvMat *labelHeader = cvCreateMatHeader(totalData, 1, CV_32FC1); cvInitMatHeader(dataHeader, totalData, dictionary->rows, CV_32FC1, trainData); cvInitMatHeader(labelHeader, totalData, 1, CV_32FC1, dataLabel); //Train the SVM //CvSVM svm(trainData, dataLabel, 0, 0, // CvSVMParams(CvSVM::C_SVC, CvSVM::LINEAR, 0, 0, 0, 2, // 0, 0, 0, cvTermCriteria(CV_TERMCRIT_EPS,0, 0.01))); //strcpy(classifierFile, fileName); //if(SVMModel_CV != NULL) // delete SVMModel_CV; SVMModel_CV.clear(); SVMModel_CV.train_auto(dataHeader, labelHeader, 0, 0, SVMParam_CV, 10); SVMModel_CV.save(classifierFile); cvReleaseMatHeader(&dataHeader); cvReleaseMatHeader(&labelHeader); for(i = 0; i < totalData; i++) delete [] trainData[i]; delete [] trainData; delete [] dataLabel; classifierType = CVSVM_CLASSIFIER; }