void main() { IplImage* img; CvCapture* cap=cvCaptureFromCAM(0); cvNamedWindow("Line Counter", 1); CvFont* font1=new CvFont; CvFont* font2=new CvFont; cvInitFont(font1, CV_FONT_HERSHEY_SIMPLEX, 0.5f, 1.0f, 0, 3, 8); cvInitFont(font2, CV_FONT_HERSHEY_SIMPLEX, 0.5f, 1.0f, 0, 2, 8); int val=0, axx=0, bxx=0; char text[8]; for (;;) { img = cvQueryFrame(cap); if (!img) break; IplImage* gray1=cvCreateImage(cvSize(img->width, img->height), 8, 1); IplImage* edge1=cvCreateImage(cvSize(img->width, 16), 8, 1); cvCvtColor(img, gray1, 7); extract(gray1, edge1); dy(edge1, edge1); cvThreshold(edge1, edge1, 10, 255, CV_THRESH_BINARY_INV); val=count(edge1); if (val==0&&axx==0) { axx=1; } if (val==2&&axx==1) { axx=0; bxx++; } sprintf(text, "%i", bxx); comb(gray1, edge1); cvPutText(gray1, text, cvPoint(10, 160), font1, cvScalarAll(255)); cvPutText(gray1, text, cvPoint(10, 160), font2, cvScalarAll(0)); cvShowImage("Line Counter", gray1); if (cvWaitKey(5) > 0) break; cvReleaseImage(&gray1); cvReleaseImage(&edge1); } }
IplImage *ocv_histogram1(IplImage *image) { if (!image) { present(1, "!image"); return NULL; } unsigned char *src = (unsigned char *)image->imageData; unsigned int width = image->width; unsigned int height = image->height; unsigned int widthStep = image->widthStep; double frequencies[256]; data_histogram(frequencies, src, width, height, widthStep); IplImage *image2 = cvCreateImage(cvGetSize(image), IPL_DEPTH_8U, 1); cvSet(image2, cvScalarAll(0), NULL); double spacing = (double)width / 256; for (int i = 0; i < 255; i++) { cvLine(image2, cvPoint((int)(i * spacing), height * (1 - frequencies[i])), cvPoint((int)((i+1)*spacing), height * (1 - frequencies[i])), cvScalarAll(255), 1,8,0); } return image2; }
static void projectImg(IplImage *src, int64_t TRANS_X, int64_t TRANS_Y, IplImage *dst, CvMat *tmatrix) { if (tmatrix->rows == 2) { //translate CvMat* result = cvCreateMat(2, 3, CV_32FC1); cvSetReal2D(result, 0, 0, cvGetReal2D(tmatrix, 0, 0)); cvSetReal2D(result, 0, 1, cvGetReal2D(tmatrix, 0, 1)); cvSetReal2D(result, 1, 0, cvGetReal2D(tmatrix, 1, 0)); cvSetReal2D(result, 1, 1, cvGetReal2D(tmatrix, 1, 1)); cvSetReal2D(result, 0, 2, cvGetReal2D(tmatrix, 0, 2) + TRANS_X); cvSetReal2D(result, 1, 2, cvGetReal2D(tmatrix, 1, 2) + TRANS_Y); cvWarpAffine(src, dst, result, CV_INTER_LINEAR, cvScalarAll(0)); cvReleaseMat(&result); } else if (tmatrix->rows == 3) { //translate matrix CvMat* offset = cvCreateMat(3, 3, CV_32FC1); cvSetReal2D(offset, 0, 0, 1); cvSetReal2D(offset, 0, 1, 0); cvSetReal2D(offset, 0, 2, TRANS_X); cvSetReal2D(offset, 1, 0, 0); cvSetReal2D(offset, 1, 1, 1); cvSetReal2D(offset, 1, 2, TRANS_Y); cvSetReal2D(offset, 2, 0, 0); cvSetReal2D(offset, 2, 1, 0); cvSetReal2D(offset, 2, 2, 1); //translate CvMat* result = cvCreateMat(3, 3, CV_32FC1); cvMatMul(offset, tmatrix, result); cvWarpPerspective(src, dst, result, CV_INTER_LINEAR, cvScalarAll(0)); cvReleaseMat(&offset); cvReleaseMat(&result); } }
int CV_CalcHistTest::prepare_test_case( int test_case_idx ) { int code = CV_BaseHistTest::prepare_test_case( test_case_idx ); if( code > 0 ) { CvRNG* rng = ts->get_rng(); int i; for( i = 0; i <= CV_MAX_DIM; i++ ) { if( i < cdims ) { int nch = 1; //cvTsRandInt(rng) % 3 + 1; images[i] = cvCreateImage( img_size, img_type == CV_8U ? IPL_DEPTH_8U : IPL_DEPTH_32F, nch ); channels[i] = cvTsRandInt(rng) % nch; cvRandArr( rng, images[i], CV_RAND_UNI, cvScalarAll(low), cvScalarAll(high) ); } else if( i == CV_MAX_DIM && cvTsRandInt(rng) % 2 ) { // create mask images[i] = cvCreateImage( img_size, IPL_DEPTH_8U, 1 ); // make ~25% pixels in the mask non-zero cvRandArr( rng, images[i], CV_RAND_UNI, cvScalarAll(-2), cvScalarAll(2) ); } } } return code; }
void on_mouse(int event, int x, int y, int flags, void*){ if(!img) return; if(event == CV_EVENT_LBUTTONUP){ pt = cvPoint(x, y); if(prev_pt.x < 0) prev_pt = pt; cvRectangle(img, prev_pt, pt, cvScalarAll(255), 2, 8, 0); cvShowImage("学生証スキャナー", img); } else if(event == CV_EVENT_LBUTTONDOWN){ cvCopy(img0, tmp); cvCopy(img0, img); cvShowImage("学生証スキャナー", tmp); prev_pt = cvPoint(x, y); //set the start point } else if(event == CV_EVENT_MOUSEMOVE && (flags == CV_EVENT_FLAG_LBUTTON)){ pt = cvPoint(x, y); if(prev_pt.x < 0) prev_pt = pt; cvCopy(img, tmp); cvRectangle(tmp, prev_pt, pt, cvScalarAll(255), 2, CV_AA, 0); cvShowImage("学生証スキャナー", tmp); } }
void CV_BaseHistTest::init_hist( int /*test_case_idx*/, int hist_i ) { if( gen_random_hist ) { CvRNG* rng = ts->get_rng(); CvArr* h = hist[hist_i]->bins; if( hist_type == CV_HIST_ARRAY ) { cvRandArr( rng, h, CV_RAND_UNI, cvScalarAll(0), cvScalarAll(gen_hist_max_val) ); } else { int i, j, total_size = 1, nz_count; int idx[CV_MAX_DIM]; for( i = 0; i < cdims; i++ ) total_size *= dims[i]; nz_count = cvTsRandInt(rng) % MAX( total_size/4, 100 ); nz_count = MIN( nz_count, total_size ); // a zero number of non-zero elements should be allowed for( i = 0; i < nz_count; i++ ) { for( j = 0; j < cdims; j++ ) idx[j] = cvTsRandInt(rng) % dims[j]; cvSetRealND( h, idx, cvTsRandReal(rng)*gen_hist_max_val ); } } } }
void thinImage(IplImage *source, IplImage *destination) { for (int i = 0; i < 6; i++) { //cleanup for (int i = 1; i < source->height - 2; i++) { for (int j = 1; j < source->width - 2; j++) { if (cvGet2D(destination, i, j).val[0] == BLACK_PIXEL) { if (firstCondition(destination, i, j) && secondCondition(destination, i, j) && thirdCondition(destination, i, j) && fourthCondition(destination, i, j) && fifthCondition(destination, i, j) && sixthCondition(destination, i, j) ) { cvSet2D(destination, i, j, cvScalarAll(DELETED_PIXEL)); } } } } for (int i = 0; i < source->height - 5; i++) { for (int j = 0; j < source->width - 5; j++) { if (cvGet2D(destination, i, j).val[0] == DELETED_PIXEL) { cvSet2D(destination, i, j, cvScalarAll(WHITE_PIXEL)); } } } } }
void flood(IplImage *img) { CvPoint seed=cvPoint(g,h); CvScalar color=CV_RGB(250,0,0); cvFloodFill(img,seed,color,cvScalarAll(200.0),cvScalarAll(200.0),NULL,CV_FLOODFILL_FIXED_RANGE,NULL); printf("ab %d %d\n",g,h); }
int CV_CalcBackProjectTest::prepare_test_case( int test_case_idx ) { int code = CV_BaseHistTest::prepare_test_case( test_case_idx ); if( code > 0 ) { CvRNG* rng = ts->get_rng(); int i, j, n, img_len = img_size.width*img_size.height; for( i = 0; i < CV_MAX_DIM + 3; i++ ) { if( i < cdims ) { int nch = 1; //cvTsRandInt(rng) % 3 + 1; images[i] = cvCreateImage( img_size, img_type == CV_8U ? IPL_DEPTH_8U : IPL_DEPTH_32F, nch ); channels[i] = cvTsRandInt(rng) % nch; cvRandArr( rng, images[i], CV_RAND_UNI, cvScalarAll(low), cvScalarAll(high) ); } else if( i == CV_MAX_DIM && cvTsRandInt(rng) % 2 ) { // create mask images[i] = cvCreateImage( img_size, IPL_DEPTH_8U, 1 ); // make ~25% pixels in the mask non-zero cvRandArr( rng, images[i], CV_RAND_UNI, cvScalarAll(-2), cvScalarAll(2) ); } else if( i > CV_MAX_DIM ) { images[i] = cvCreateImage( img_size, images[0]->depth, 1 ); } } cvTsCalcHist( images, hist[0], images[CV_MAX_DIM], channels ); // now modify the images a bit to add some zeros go to the backprojection n = cvTsRandInt(rng) % (img_len/20+1); for( i = 0; i < cdims; i++ ) { char* data = images[i]->imageData; for( j = 0; j < n; j++ ) { int idx = cvTsRandInt(rng) % img_len; double val = cvTsRandReal(rng)*(high - low) + low; if( img_type == CV_8U ) ((uchar*)data)[idx] = (uchar)cvRound(val); else ((float*)data)[idx] = (float)val; } } } return code; }
void CV_MHIGlobalOrientTest::get_minmax_bounds( int i, int j, int type, CvScalar* low, CvScalar* high ) { CV_MHIBaseTest::get_minmax_bounds( i, j, type, low, high ); if( i == INPUT && j == 2 ) { *low = cvScalarAll(min_angle); *high = cvScalarAll(max_angle); } }
int main (int argc, char **argv) { int width=960, height=640; IplImage *img=0; double c, f; f = cvGetTickFrequency()*1000; int cx = width/2; int cy = height/2; double radius = 100; double angle = 0; CvScalar color = cvScalarAll(255); CvFont font; cvInitFont(&font, CV_FONT_HERSHEY_SIMPLEX, 0.5, 1.0, 1.0, 1, CV_AA); cvNamedWindow ("hexagon", CV_WINDOW_AUTOSIZE); while (1) { // (1)allocate and initialize an image img = cvCreateImage(cvSize(width, height), IPL_DEPTH_8U, 3); if(img == 0) return -1; cvZero(img); // (2) draw hexagon c = cvGetTickCount(); myHexagon(img, cx, cy, radius, angle, color); printf("%fms\n", (cvGetTickCount()-c)/f); // (3)show the iamge, and press some key cvPutText(img, "Coordinate Right(D) Left(A) Up(W) Down(X)", cvPoint(10, 20), &font, cvScalarAll(255)); cvPutText(img, "Rotate Right(R) Left(E)", cvPoint(10, 40), &font, cvScalarAll(255)); cvPutText(img, "Radius Big(V) Small(C)", cvPoint(10, 60), &font, cvScalarAll(255)); cvPutText(img, "Quit(Q, esc)", cvPoint(10, 80), &font, cvScalarAll(255)); char s[64]; sprintf(s, "cx:%d cy:%d radius:%f angle:%f", cx, cy, radius, angle); cvPutText(img, s, cvPoint(10, 110), &font, cvScalarAll(255)); cvShowImage ("hexagon", img); char key = cvWaitKey (0); if (key == 27 || key == 'q') break; else if (key == 'r') angle += 5; else if (key == 'e') angle -= 5; else if (key == 'a') cx -= 5; else if (key == 'd') cx += 5; else if (key == 'w') cy -= 5; else if (key == 'x') cy += 5; else if (key == 'v') radius += 5; else if (key == 'c') radius -= 5; } cvDestroyWindow("hexagon"); cvReleaseImage(&img); return 0; }
void ofxCvWatershed::segment() { reset(); int nContours = cvFindContours( iplMarkersTempImg, storage, &contours, sizeof(CvContour), CV_RETR_CCOMP, CV_CHAIN_APPROX_SIMPLE ); int i, j, compCount = 0; cvZero( iplMarkers32sImg ); for( ; contours != 0; contours = contours->h_next, compCount++ ) { cvDrawContours( iplMarkers32sImg, contours, cvScalarAll(compCount+1), cvScalarAll(compCount+1), -1, -1, 8, cvPoint(0,0) ); } CvRNG rng = cvRNG(-1); colors = cvCreateMat( 1, compCount, CV_8UC3 ); for( i = 0; i < compCount; i++ ) { uchar* ptr = colors->data.ptr + i*3; // no colors for now. ptr[0] = (uchar)0;//(cvRandInt(&rng)%180 + 50); ptr[1] = (uchar)0;//(cvRandInt(&rng)%180 + 50); ptr[2] = (uchar)0;//(cvRandInt(&rng)%180 + 50); } cvWatershed( iplTargetImg, iplMarkers32sImg ); // paint the watershed image for( i = 0; i < iplMarkers32sImg->height; i++ ) { for( j = 0; j < iplMarkers32sImg->width; j++ ) { int idx = CV_IMAGE_ELEM( iplMarkers32sImg, int, i, j ); uchar* dst = &CV_IMAGE_ELEM( iplTargetImg, uchar, i, j*3 ); if( idx == -1 ) { dst[0] = dst[1] = dst[2] = (uchar)255; } else if( idx <= 0 || idx > compCount ){ dst[0] = dst[1] = dst[2] = (uchar)0; // should not get here }else { uchar* ptr = colors->data.ptr + (idx-1)*3; dst[0] = ptr[0]; dst[1] = ptr[1]; dst[2] = ptr[2]; } } } //cvAddWeighted( watershed, 0.5, colorImg.getCvImage(), 0.5, 0, watershed ); watershedImg = iplTargetImg; watershedGrayImg = watershedImg; watershedGrayImg.threshold(140); //watershedGrayImg.invert(); printf("contorus %i", contourFinder.findContours( watershedGrayImg, 10, (watershedImg.width * watershedImg.height)/ 2.f, 20, true)); }
void setup(CvSize size) { BLACK1D = cvCreateImage(size, IPL_DEPTH_8U, 1); cvSet(BLACK1D, cvScalarAll(0), NULL); GRAY1D = cvCreateImage(size, IPL_DEPTH_8U, 1); cvSet(GRAY1D, cvScalarAll(127), NULL); WHITE1D = cvCreateImage(size, IPL_DEPTH_8U, 1); cvSet(WHITE1D, cvScalarAll(255), NULL); }
int catcierge_haar_matcher_find_prey(catcierge_haar_matcher_t *ctx, IplImage *img, IplImage *thr_img, match_result_t *result, int save_steps) { catcierge_haar_matcher_args_t *args = ctx->args; IplImage *thr_img2 = NULL; CvSeq *contours = NULL; size_t contour_count = 0; assert(ctx); assert(img); assert(ctx->args); // thr_img is modified by FindContours so we clone it first. thr_img2 = cvCloneImage(thr_img); cvFindContours(thr_img, ctx->storage, &contours, sizeof(CvContour), CV_RETR_LIST, CV_CHAIN_APPROX_NONE, cvPoint(0, 0)); // If we get more than 1 contour we count it as a prey. At least something // is intersecting the white are to split up the image. contour_count = catcierge_haar_matcher_count_contours(ctx, contours); // If we don't find any prey if ((args->prey_steps >= 2) && (contour_count == 1)) { IplImage *erod_img = NULL; IplImage *open_img = NULL; CvSeq *contours2 = NULL; erod_img = cvCreateImage(cvGetSize(thr_img2), 8, 1); cvErode(thr_img2, erod_img, ctx->kernel3x3, 3); if (ctx->super.debug) cvShowImage("haar eroded img", erod_img); open_img = cvCreateImage(cvGetSize(thr_img2), 8, 1); cvMorphologyEx(erod_img, open_img, NULL, ctx->kernel5x1, CV_MOP_OPEN, 1); if (ctx->super.debug) cvShowImage("haar opened img", erod_img); cvFindContours(erod_img, ctx->storage, &contours2, sizeof(CvContour), CV_RETR_LIST, CV_CHAIN_APPROX_NONE, cvPoint(0, 0)); cvReleaseImage(&erod_img); cvReleaseImage(&open_img); contour_count = catcierge_haar_matcher_count_contours(ctx, contours2); } if (ctx->super.debug) { cvDrawContours(img, contours, cvScalarAll(0), cvScalarAll(0), 1, 1, 8, cvPoint(0, 0)); cvShowImage("Haar Contours", img); } cvReleaseImage(&thr_img2); return (contour_count > 1); }
void graficarHistograma(IplImage *dst, size_t binsCount, size_t *bins) { static CvScalar hist_color = cvScalarAll(255); size_t hist_size = 256; //cvSet(ImagenHistorial, cvScalarAll(0), 0); //Actúo en función de la cantidad de colores de la imágen if (dst->nChannels == 1) { size_t max_value = 0; for (size_t i = 0; i < binsCount * hist_size; i++) { max_value = (bins[i] > max_value) ? bins[i] : max_value; } for (size_t i = 0; i < binsCount * hist_size; i++) { bins[i] /= max_value; } float w_scale = ((float)dst->width) / hist_size; //Graficar en la imagen for (int i = 0; i < hist_size; i++) { cvLine(dst, cvPoint(binsCount * hist_size + (int)(i * w_scale), dst->height - bins[i]), cvPoint(binsCount * hist_size + (int)((i + 1) * w_scale), dst->height - bins[i]), hist_color, 2, 8, 0); } //printf("Scale bw: %4.2f pixels per 100 units\r", max_value * 100 / ((float)ImagenHistorial->height)); } else if (dst->nChannels == 3) { IplImage *channelA = cvCreateImage(cvGetSize(dst), IPL_DEPTH_8U, 1); IplImage *channelB = cvCreateImage(cvGetSize(dst), IPL_DEPTH_8U, 1); IplImage *channelC = cvCreateImage(cvGetSize(dst), IPL_DEPTH_8U, 1); cvSplit(dst, channelA, channelB, channelC, NULL); size_t mybins[256]; size_t max_value = 0; for (size_t i = 0; i < hist_size; i++) { max_value = (bins[i] > max_value) ? bins[i] : max_value; } for (size_t i = 0; i < binsCount * hist_size; i++) { bins[i] /= max_value; } hist_color = cvScalar(255, 0, 0); graficarHistograma(channelA, binsCount, bins); hist_color = cvScalar(0, 255, 0); graficarHistograma(channelB, binsCount, bins); hist_color = cvScalar(0, 0, 255); graficarHistograma(channelC, binsCount, bins); hist_color = cvScalarAll(255); cvReleaseImage(&channelA); cvReleaseImage(&channelB); cvReleaseImage(&channelC); } }
/** * Paint all contours with a single OpenCV call on an image. */ void test_cvDrawContours( IplImage *img, CvSeq* contours) { IplImage* image_all_contours = cvCreateImage(cvGetSize(img), 8, 1); cvCopy(img, image_all_contours, NULL); // CvSeq* contour = contours; // first contour // TODO need for loop to iterate through sequence cvDrawContours( image_all_contours, contours, cvScalarAll(255), cvScalarAll(0), 0, CV_FILLED, 8, cvPoint(0,0)); cvShowImage( "All contours", image_all_contours); cvReleaseImage(&image_all_contours); }
//========================================= CvKalman* initKalman(CvKalman* kalman) { //========================================= const float A[] = {1, 0, 1, 0, 0, 1, 0, 1, 0, 0, 1, 0, 0, 0, 0, 1}; kalman = cvCreateKalman(4, 2, 0); memcpy(kalman->transition_matrix->data.fl, A, sizeof(A));//A cvSetIdentity(kalman->measurement_matrix, cvScalarAll(1));//H cvSetIdentity(kalman->process_noise_cov, cvScalarAll(1e-5));//Q w ; cvSetIdentity(kalman->measurement_noise_cov, cvScalarAll(1e-1));//R v cvSetIdentity(kalman->error_cov_post, cvScalarAll(1));//P return kalman; }
/* Shadow Removal on the basis of Y correction and colour adjustment*/ cv::Mat LaneDetector::shadowRemoval(cv::Mat &img){ cv::Mat original_image=img; int shadow_mean =0, non_shadow_mean =0 ,count_shadow=0,count_non_shadow=0, difference; cv::Mat binary(original_image.rows,original_image.cols,CV_8UC1,cvScalarAll(0)); cv::Mat image_ycrcb(original_image.rows,original_image.cols,CV_8UC3,cvScalarAll(0)); cv::Mat final(original_image.rows,original_image.cols,CV_8UC3,cvScalarAll(0)); cvtColor(original_image,image_ycrcb,CV_BGR2YCrCb); binary = shadowDetection(image_ycrcb); if (debug_mode > 0){ cv::namedWindow("binary_image",1); cv::imshow("binary_image",binary); cv::waitKey(20); } cv::Mat element = cv::getStructuringElement( cv::MORPH_ELLIPSE, cv::Size( 7,7), cv::Point( 3, 3) ); dilate(binary,binary,element); element = cv::getStructuringElement( cv::MORPH_ELLIPSE, cv::Size( 5,5), cv::Point( 2, 2) ); erode(binary,binary,element); if (debug_mode > 0){ cv::namedWindow("eroded_image",1); cv::imshow("eroded_image",binary); cv::waitKey(20); } for (int i=0;i<image_ycrcb.rows;i++){ for (int j=0;j<image_ycrcb.cols;j++){ if (binary.at<uchar>(i,j)==0){ shadow_mean += image_ycrcb.at<cv::Vec3b>(i,j)[0]; count_shadow++; } else{ non_shadow_mean += image_ycrcb.at<cv::Vec3b>(i,j)[0]; count_non_shadow++; } } } if (count_shadow != 0){ difference = non_shadow_mean/count_non_shadow - shadow_mean/count_shadow; for (int i=0;i<image_ycrcb.rows;i++){ for (int j=0;j<image_ycrcb.cols;j++){ if (binary.at<uchar>(i,j)==0){ image_ycrcb.at<cv::Vec3b>(i,j)[0] += difference/2; // Y adjustment image_ycrcb.at<cv::Vec3b>(i,j)[2] -= difference/6; // Colour adjustment } } } cvtColor(image_ycrcb,final,CV_YCrCb2BGR); return final; }
cv::Mat &RegionMask::QPolygon2Mask(cv::Mat &img, const QPolygon external, const QList<QPolygon> &holes) { vector<vector<cv::Point> > points; Utils::QPolygon2CvPointArray(external, points); cv::fillPoly(img, points, cvScalarAll(255), 4); for(const QPolygon& hole : holes){ Utils::QPolygon2CvPointArray(hole, points); cv::fillPoly(img, points, cvScalarAll(0),4 ); } return img; }
struct point* get_contour_points_from_image_with_size (const GdkPixbuf *image, int *size) { IplImage *ipl_image, *ipl_gray; CvMemStorage *contours; CvSeq *first_contour; CvScalar black, white; struct point *result; black = cvScalarAll (0); white = cvScalarAll (255); ipl_image = pixbuf2ipl (image); ipl_gray = cvCreateImage (cvGetSize (ipl_image), ipl_image->depth, N_CHANNELS_GRAY); cvCvtColor (ipl_image, ipl_gray, CV_BGR2GRAY); cvThreshold (ipl_gray, ipl_gray, 127, 255, CV_THRESH_BINARY|CV_THRESH_OTSU); cvSmooth (ipl_gray, ipl_gray, CV_GAUSSIAN, 15, 15, 0, 0); contours = cvCreateMemStorage (0); first_contour = NULL; cvFindContours (ipl_gray, contours, &first_contour, sizeof (CvContour), CV_RETR_LIST, CV_CHAIN_APPROX_NONE, cvPoint (0,0)); result = (struct point*) malloc (sizeof (struct point) * first_contour->total); for (int i = 0; i < first_contour->total; ++i) { CvPoint *contour_point; contour_point = CV_GET_SEQ_ELEM (CvPoint, first_contour, i); result[i].x = contour_point->x; result[i].y = contour_point->y; } *size = first_contour->total; cvReleaseImage (&ipl_image); cvReleaseImage (&ipl_gray); cvReleaseMemStorage (&contours); return result; }
unsigned char p2p4Check(IplImage *source, int i, int j, int k, int l) { unsigned char pixelShouldBeDeleted = YES; if (cvGet2D(source, i + k, j + l).val[0] == DELETED_PIXEL) { pixelShouldBeDeleted = NO; cvSet2D(source, i + k, j + l, cvScalarAll(WHITE_PIXEL)); if (fourthCondition(source, i, j) == YES) { pixelShouldBeDeleted = YES; } cvSet2D(source, i+k, j+l, cvScalarAll(DELETED_PIXEL)); } return pixelShouldBeDeleted; }
void SetImageFloodFill(IplImage *img) { CvPoint *imgPoint = new CvPoint; imgPoint->x = img->width / 2; imgPoint->y = img->height / 2; CvScalar *imgScalar = new CvScalar; imgScalar->val[0] = 215; imgScalar->val[1] = 59; imgScalar->val[2] = 62; cvFloodFill(img, *imgPoint, *imgScalar, cvScalarAll(7.0), cvScalarAll(7.0), NULL, 4, NULL); }
CV_IMPL CvSeq* cvSegmentFGMask( CvArr* _mask, int poly1Hull0, float perimScale, CvMemStorage* storage, CvPoint offset ) { CvMat mstub, *mask = cvGetMat( _mask, &mstub ); CvMemStorage* tempStorage = storage ? storage : cvCreateMemStorage(); CvSeq *contours, *c; int nContours = 0; CvContourScanner scanner; // clean up raw mask cvMorphologyEx( mask, mask, 0, 0, CV_MOP_OPEN, 1 ); cvMorphologyEx( mask, mask, 0, 0, CV_MOP_CLOSE, 1 ); // find contours around only bigger regions scanner = cvStartFindContours( mask, tempStorage, sizeof(CvContour), CV_RETR_EXTERNAL, CV_CHAIN_APPROX_SIMPLE, offset ); while( (c = cvFindNextContour( scanner )) != 0 ) { double len = cvContourPerimeter( c ); double q = (mask->rows + mask->cols)/perimScale; // calculate perimeter len threshold if( len < q ) //Get rid of blob if it's perimeter is too small cvSubstituteContour( scanner, 0 ); else //Smooth it's edges if it's large enough { CvSeq* newC; if( poly1Hull0 ) //Polygonal approximation of the segmentation newC = cvApproxPoly( c, sizeof(CvContour), tempStorage, CV_POLY_APPROX_DP, 2, 0 ); else //Convex Hull of the segmentation newC = cvConvexHull2( c, tempStorage, CV_CLOCKWISE, 1 ); cvSubstituteContour( scanner, newC ); nContours++; } } contours = cvEndFindContours( &scanner ); // paint the found regions back into the image cvZero( mask ); for( c=contours; c != 0; c = c->h_next ) cvDrawContours( mask, c, cvScalarAll(255), cvScalarAll(0), -1, CV_FILLED, 8, cvPoint(-offset.x,-offset.y)); if( tempStorage != storage ) { cvReleaseMemStorage( &tempStorage ); contours = 0; } return contours; }
// ***************************CHECKPOINT 1 Methods: Initialization************************** // Initialization int initialize(FILE * poseFile, struct pData * poses, char ** filenames, int file_count) { int c, count, vecCount, matCount; char filename[45]; char timeData[40]; char poseData[200]; poses[file_count].eye = cvScalarAll(0.0); poses[file_count].center = cvScalarAll(0.0); poses[file_count].up = cvScalarAll(0.0); count = 0; memset(filename, 0, 40); while((c = fgetc(poseFile)) != ' ') { filename[count] = (char) c; count++; } filename[count] = '.'; filename[count+1] = 'j'; filename[count+2] = 'p'; filename[count+3] = 'g'; filenames[file_count] = (char*) malloc((strlen(filename) + 4) * sizeof(char)); strcpy(filenames[file_count], filename); count = 0; memset(timeData, 0, 40); while((c = fgetc(poseFile)) != ' ') { timeData[count] = (char) c; count++; } for (matCount = 0; matCount < 3; matCount++) { for (vecCount = 0; vecCount < 3; vecCount++) { count = 0; memset(poseData, 0, 200); while((c = fgetc(poseFile)) != ' ' && c != '\n') { poseData[count] = (char) c; count++; } if (matCount == 0) { poses[file_count].eye.val[vecCount] = atof(poseData); } else if (matCount == 1) { poses[file_count].center.val[vecCount] = atof(poseData); } else { poses[file_count].up.val[vecCount] = atof(poseData); } } } }
void CV_MHIBaseTest::get_minmax_bounds( int i, int j, int type, CvScalar* low, CvScalar* high ) { CvArrTest::get_minmax_bounds( i, j, type, low, high ); if( i == INPUT && CV_MAT_DEPTH(type) == CV_8U ) { *low = cvScalarAll(cvRound(-1./silh_ratio)+2.); *high = cvScalarAll(2); } else if( i == mhi_i || i == mhi_ref_i ) { *low = cvScalarAll(-exp(max_log_duration)); *high = cvScalarAll(0.); } }
void adjustHSV(IplImage *&src, int HuePosition, int SaturationPosition, int ValuePosition) { int Hue = HuePosition; double Saturation = SaturationPosition * 2.55; double Value = ValuePosition / 100.; //create float image IplImage *temp = cvCreateImage(cvGetSize(src), IPL_DEPTH_32F, src->nChannels); cvConvertScale(src, temp, 1.0/255.0, 0); //split IplImage* floatingH = cvCreateImage( cvGetSize(src), IPL_DEPTH_32F, 1 ); IplImage* floatingS = cvCreateImage( cvGetSize(src), IPL_DEPTH_32F, 1 ); IplImage* floatingV = cvCreateImage( cvGetSize(src), IPL_DEPTH_32F, 1 ); cvCvtColor(temp, temp, CV_BGR2HSV);//color convert cvSplit( temp, floatingH, floatingS, floatingV, NULL); //adjust cvAddS(floatingH, cvScalarAll(Hue), floatingH); cvAddS(floatingV, cvScalarAll(Value), floatingV); //merge cvZero(temp); cvMerge(floatingH, floatingS, floatingV, NULL, temp); cvCvtColor(temp, temp, CV_HSV2BGR); //save cvConvertScale( temp, src, 255, 0 ); IplImage *HSV = convertImageRGBtoHSV(src); IplImage *H = cvCreateImage(cvGetSize(src), src->depth, 1); IplImage *S = cvCreateImage(cvGetSize(src), src->depth, 1); IplImage *V = cvCreateImage(cvGetSize(src), src->depth, 1); cvSplit(HSV, H, S, V, 0); cvAddS(S, cvScalarAll(Saturation), S); cvMerge(H, S, V, 0, HSV); cvReleaseImage(&src); src = convertImageHSVtoRGB(HSV); cvReleaseImage(&HSV); cvReleaseImage(&H); cvReleaseImage(&S); cvReleaseImage(&V); cvReleaseImage(&temp); cvReleaseImage(&floatingH); cvReleaseImage(&floatingS); cvReleaseImage(&floatingV); }//end HSV
void showContent(IplImage * img) { if(temp==NULL) temp= cvCreateImage(cvGetSize(img), IPL_DEPTH_8U, 1); cvCopyImage(img,temp); for (int i=0;i<InitContour.size();i++) { cvCircle(temp, InitContour[i], 2, cvScalarAll(155)); int j = (i+1)%InitContour.size(); cvLine( temp, InitContour[i],InitContour[j] , cvScalarAll(100), 1); } cvSaveImage("sef.jpg",temp); cvShowImage( "srcImage", temp ); }
cv::Mat applyThreshold(cv::Mat &img, int debug) { int bin_threshold = 180; cv::Mat grayscale_image(img.rows, img.cols, CV_8UC1, cvScalarAll(0)); cv::Mat threshold_image(img.rows, img.cols, CV_8UC1, cvScalarAll(0)); if (debug == 4) { cv::namedWindow("threshold_control_box", 1); cv::createTrackbar("bin_threshold", "threshold_control_box", &bin_threshold, 255); } cv::inRange(img, cv::Scalar(bin_threshold, bin_threshold, bin_threshold), cv::Scalar(256, 256, 256), threshold_image); return threshold_image; } // converting a given image into binary using a threshold_image
void DrawResult(IplImage* pSourceImage, CvSize iTemplateSize, ShiftValue iShiftParam, double dColor) { if (pSourceImage == NULL) { printf("DrawResult input SourceImage is NULL"); return; } double dX = iShiftParam.dX; double dY = iShiftParam.dY; double dWidth = iTemplateSize.width; double dHeight = iTemplateSize.height; CvPoint iTopLeft, iTopRight, iBottomLeft, iBottomRight; iTopLeft = cvPoint(-dWidth / 2, -dHeight / 2); iTopRight = cvPoint(dWidth / 2, -dHeight / 2); iBottomLeft = cvPoint(-dWidth / 2, dHeight / 2); iBottomRight = cvPoint(dWidth / 2, dHeight / 2); CvPoint iTmpPoint; GetRotatedPoint(iTopLeft, iShiftParam.dAngle, iTmpPoint); iTopLeft = iTmpPoint; GetRotatedPoint(iTopRight, iShiftParam.dAngle, iTmpPoint); iTopRight = iTmpPoint; GetRotatedPoint(iBottomLeft, iShiftParam.dAngle, iTmpPoint); iBottomLeft = iTmpPoint; GetRotatedPoint(iBottomRight, iShiftParam.dAngle, iTmpPoint); iBottomRight = iTmpPoint; iTopLeft.x += dX; iTopRight.x += dX; iBottomLeft.x += dX; iBottomRight.x += dX; iTopLeft.y += dY; iTopRight.y += dY; iBottomLeft.y += dY; iBottomRight.y += dY; cvCircle(pSourceImage, cvPoint(dX, dY), 2, cvScalarAll(dColor)); cvCircle(pSourceImage, iTopLeft, 2, cvScalarAll(dColor)); cvCircle(pSourceImage, iTopRight, 2, cvScalarAll(dColor)); cvCircle(pSourceImage, iBottomLeft, 2, cvScalarAll(dColor)); cvCircle(pSourceImage, iBottomRight, 2, cvScalarAll(dColor)); cvLine(pSourceImage, iTopLeft, iTopRight, cvScalarAll(dColor)); cvLine(pSourceImage, iTopRight, iBottomRight, cvScalarAll(dColor)); cvLine(pSourceImage, iBottomRight, iBottomLeft, cvScalarAll(dColor)); cvLine(pSourceImage, iBottomLeft, iTopLeft, cvScalarAll(dColor)); }
CvSeq *reghand::filthull2(CvSeq *filted_elimhull) { //CvSeq *filtedhullseq=cvCloneSeq(filted_elimhull); float maxdis=0;CvPoint **fingpt;CvScalar mean,std=cvScalarAll(0); CvMat *dismat=cvCreateMat(1,filted_elimhull->total,CV_32FC1); CvPoint2D32f center=minrect.center; for (int i=0;i<filted_elimhull->total;i++) { CvPoint **data=CV_GET_SEQ_ELEM(CvPoint*,filted_elimhull,i); CvPoint pt=**data; float dis=sqrt(pow(pt.x-center.x,2)+pow(pt.y-center.y,2)); dismat->data.fl[i]=dis; if(dis>maxdis){maxdis=dis;fingpt=data;} } cvAvgSdv(dismat,&mean,&std); if(filted_elimhull->total==1&&maxdis>fingerTh*0.5) return filted_elimhull; if(filted_elimhull->total==2&&maxdis>fingerTh*0.5&&std.val[0]<10) { CvPoint startpt=**CV_GET_SEQ_ELEM(CvPoint*,filted_elimhull,0); CvPoint endpt=**CV_GET_SEQ_ELEM(CvPoint*,filted_elimhull,1);; double bfang=atan(double(startpt.y-handcenter.y)/(startpt.x-handcenter.x))*180/PI; if(bfang<0)bfang+=180; double afang=atan(double(endpt.y-handcenter.y)/(endpt.x-handcenter.x))*180/PI; if(afang<0)afang+=180; if(fabs(bfang-afang)>60) {cvClearSeq(filted_elimhull);cvSeqPush(filted_elimhull,fingpt);return filted_elimhull;} else return filted_elimhull; }