Example #1
0
IplImage * find_macbeth( const char *img )
{
    IplImage * macbeth_img = cvLoadImage( img,
        CV_LOAD_IMAGE_ANYCOLOR|CV_LOAD_IMAGE_ANYDEPTH );
        
    IplImage * macbeth_original = cvCreateImage( cvSize(macbeth_img->width, macbeth_img->height), macbeth_img->depth, macbeth_img->nChannels );
    cvCopy(macbeth_img, macbeth_original);
        
    IplImage * macbeth_split[3];
    IplImage * macbeth_split_thresh[3];
    
    for(int i = 0; i < 3; i++) {
        macbeth_split[i] = cvCreateImage( cvSize(macbeth_img->width, macbeth_img->height), macbeth_img->depth, 1 );
        macbeth_split_thresh[i] = cvCreateImage( cvSize(macbeth_img->width, macbeth_img->height), macbeth_img->depth, 1 );
    }
    
    cvSplit(macbeth_img, macbeth_split[0], macbeth_split[1], macbeth_split[2], NULL);
    
    if( macbeth_img )
    {
        int adaptive_method = CV_ADAPTIVE_THRESH_MEAN_C;
        int threshold_type = CV_THRESH_BINARY_INV;
        int block_size = cvRound(
            MIN(macbeth_img->width,macbeth_img->height)*0.02)|1;
        fprintf(stderr,"Using %d as block size\n", block_size);
        
        double offset = 6;
        
        // do an adaptive threshold on each channel
        for(int i = 0; i < 3; i++) {
            cvAdaptiveThreshold(macbeth_split[i], macbeth_split_thresh[i], 255, adaptive_method, threshold_type, block_size, offset);
        }
        
        IplImage * adaptive = cvCreateImage( cvSize(macbeth_img->width, macbeth_img->height), IPL_DEPTH_8U, 1 );
        
        // OR the binary threshold results together
        cvOr(macbeth_split_thresh[0],macbeth_split_thresh[1],adaptive);
        cvOr(macbeth_split_thresh[2],adaptive,adaptive);
        
        for(int i = 0; i < 3; i++) {
            cvReleaseImage( &(macbeth_split[i]) );
            cvReleaseImage( &(macbeth_split_thresh[i]) );
        }
                
        int element_size = (block_size/10)+2;
        fprintf(stderr,"Using %d as element size\n", element_size);
        
        // do an opening on the threshold image
        IplConvKernel * element = cvCreateStructuringElementEx(element_size,element_size,element_size/2,element_size/2,CV_SHAPE_RECT);
        cvMorphologyEx(adaptive,adaptive,NULL,element,CV_MOP_OPEN);
        cvReleaseStructuringElement(&element);
        
        CvMemStorage* storage = cvCreateMemStorage(0);
        
        CvSeq* initial_quads = cvCreateSeq( 0, sizeof(*initial_quads), sizeof(void*), storage );
        CvSeq* initial_boxes = cvCreateSeq( 0, sizeof(*initial_boxes), sizeof(CvBox2D), storage );
        
        // find contours in the threshold image
        CvSeq * contours = NULL;
        cvFindContours(adaptive,storage,&contours);
        
        int min_size = (macbeth_img->width*macbeth_img->height)/
            (MACBETH_SQUARES*100);
        
        if(contours) {
            int count = 0;
            
            for( CvSeq* c = contours; c != NULL; c = c->h_next) {
                CvRect rect = ((CvContour*)c)->rect;
                // only interested in contours with these restrictions
                if(CV_IS_SEQ_HOLE(c) && rect.width*rect.height >= min_size) {
                    // only interested in quad-like contours
                    CvSeq * quad_contour = find_quad(c, storage, min_size);
                    if(quad_contour) {
                        cvSeqPush( initial_quads, &quad_contour );
                        count++;
                        rect = ((CvContour*)quad_contour)->rect;
                        
                        CvScalar average = contour_average((CvContour*)quad_contour, macbeth_img);
                        
                        CvBox2D box = cvMinAreaRect2(quad_contour,storage);
                        cvSeqPush( initial_boxes, &box );
                        
                        // fprintf(stderr,"Center: %f %f\n", box.center.x, box.center.y);
                        
                        double min_distance = MAX_RGB_DISTANCE;
                        CvPoint closest_color_idx = cvPoint(-1,-1);
                        for(int y = 0; y < MACBETH_HEIGHT; y++) {
                            for(int x = 0; x < MACBETH_WIDTH; x++) {
                                double distance = euclidean_distance_lab(average,colorchecker_srgb[y][x]);
                                if(distance < min_distance) {
                                    closest_color_idx.x = x;
                                    closest_color_idx.y = y;
                                    min_distance = distance;
                                }
                            }
                        }
                        
                        CvScalar closest_color = colorchecker_srgb[closest_color_idx.y][closest_color_idx.x];
                        // fprintf(stderr,"Closest color: %f %f %f (%d %d)\n",
                        //     closest_color.val[2],
                        //     closest_color.val[1],
                        //     closest_color.val[0],
                        //     closest_color_idx.x,
                        //     closest_color_idx.y
                        // );
                        
                        // cvDrawContours(
                        //     macbeth_img,
                        //     quad_contour,
                        //     cvScalar(255,0,0),
                        //     cvScalar(0,0,255),
                        //     0,
                        //     element_size
                        // );
                        // cvCircle(
                        //     macbeth_img,
                        //     cvPointFrom32f(box.center),
                        //     element_size*6,
                        //     cvScalarAll(255),
                        //     -1
                        // );
                        // cvCircle(
                        //     macbeth_img,
                        //     cvPointFrom32f(box.center),
                        //     element_size*6,
                        //     closest_color,
                        //     -1
                        // );
                        // cvCircle(
                        //     macbeth_img,
                        //     cvPointFrom32f(box.center),
                        //     element_size*4,
                        //     average,
                        //     -1
                        // );
                        // CvRect rect = contained_rectangle(box);
                        // cvRectangle(
                        //     macbeth_img,
                        //     cvPoint(rect.x,rect.y),
                        //     cvPoint(rect.x+rect.width, rect.y+rect.height),
                        //     cvScalarAll(0),
                        //     element_size
                        // );
                    }
                }
            }
            
            ColorChecker found_colorchecker;

            fprintf(stderr,"%d initial quads found", initial_quads->total);
            if(count > MACBETH_SQUARES) {
                fprintf(stderr," (probably a Passport)\n");
                
                CvMat* points = cvCreateMat( initial_quads->total , 1, CV_32FC2 );
                CvMat* clusters = cvCreateMat( initial_quads->total , 1, CV_32SC1 );
                
                CvSeq* partitioned_quads[2];
                CvSeq* partitioned_boxes[2];
                for(int i = 0; i < 2; i++) {
                    partitioned_quads[i] = cvCreateSeq( 0, sizeof(**partitioned_quads), sizeof(void*), storage );
                    partitioned_boxes[i] = cvCreateSeq( 0, sizeof(**partitioned_boxes), sizeof(CvBox2D), storage );
                }
                
                // set up the points sequence for cvKMeans2, using the box centers
                for(int i = 0; i < initial_quads->total; i++) {
                    CvBox2D box = (*(CvBox2D*)cvGetSeqElem(initial_boxes, i));
                    
                    cvSet1D(points, i, cvScalar(box.center.x,box.center.y));
                }
                
                // partition into two clusters: passport and colorchecker
                cvKMeans2( points, 2, clusters, 
                           cvTermCriteria( CV_TERMCRIT_EPS+CV_TERMCRIT_ITER,
                                           10, 1.0 ) );
        
                for(int i = 0; i < initial_quads->total; i++) {
                    CvPoint2D32f pt = ((CvPoint2D32f*)points->data.fl)[i];
                    int cluster_idx = clusters->data.i[i];
                    
                    cvSeqPush( partitioned_quads[cluster_idx],
                               cvGetSeqElem(initial_quads, i) );
                    cvSeqPush( partitioned_boxes[cluster_idx],
                               cvGetSeqElem(initial_boxes, i) );

                    // cvCircle(
                    //     macbeth_img,
                    //     cvPointFrom32f(pt),
                    //     element_size*2,
                    //     cvScalar(255*cluster_idx,0,255-(255*cluster_idx)),
                    //     -1
                    // );
                }
                
                ColorChecker partitioned_checkers[2];
                
                // check each of the two partitioned sets for the best colorchecker
                for(int i = 0; i < 2; i++) {
                    partitioned_checkers[i] =
                        find_colorchecker(partitioned_quads[i], partitioned_boxes[i],
                                      storage, macbeth_img, macbeth_original);
                }
                
                // use the colorchecker with the lowest error
                found_colorchecker = partitioned_checkers[0].error < partitioned_checkers[1].error ?
                    partitioned_checkers[0] : partitioned_checkers[1];
                
                cvReleaseMat( &points );
                cvReleaseMat( &clusters );
            }
            else { // just one colorchecker to test
                fprintf(stderr,"\n");
                found_colorchecker = find_colorchecker(initial_quads, initial_boxes,
                                  storage, macbeth_img, macbeth_original);
            }
            
            // render the found colorchecker
            draw_colorchecker(found_colorchecker.values,found_colorchecker.points,macbeth_img,found_colorchecker.size);
            
            // print out the colorchecker info
            for(int y = 0; y < MACBETH_HEIGHT; y++) {            
                for(int x = 0; x < MACBETH_WIDTH; x++) {
                    CvScalar this_value = cvGet2D(found_colorchecker.values,y,x);
                    CvScalar this_point = cvGet2D(found_colorchecker.points,y,x);
                    
                    printf("%.0f,%.0f,%.0f,%.0f,%.0f\n",
                        this_point.val[0],this_point.val[1],
                        this_value.val[2],this_value.val[1],this_value.val[0]);
                }
            }
            printf("%0.f\n%f\n",found_colorchecker.size,found_colorchecker.error);
            
        }
                
        cvReleaseMemStorage( &storage );
        
        if( macbeth_original ) cvReleaseImage( &macbeth_original );
        if( adaptive ) cvReleaseImage( &adaptive );
        
        return macbeth_img;
    }

    if( macbeth_img ) cvReleaseImage( &macbeth_img );

    return NULL;
}
Example #2
0
bool photometric_calibration(CalibModel &model, CvCapture *capture, 
			     int nbImages, bool cache)
{

  if (cache) model.map.load();

  const char *win = "BazAR";

  IplImage*gray=0;

  cvNamedWindow(win, CV_WINDOW_AUTOSIZE);
  cvNamedWindow("LightMap", CV_WINDOW_AUTOSIZE);

  IplImage* frame = 0;
  IplImage* display=cvCloneImage(cvQueryFrame(capture));

  int nbHomography =0;
  LightCollector lc(model.map.reflc);
  IplImage *lightmap = cvCreateImage(cvGetSize(model.map.map.getIm()), IPL_DEPTH_8U, 
				     lc.avgChannels);
  while (1)
    {
      // acquire image
      frame = cvQueryFrame( capture );
      /*
	if (frame) cvReleaseImage(&frame);
	frame = cvLoadImage("model.bmp",1);
      */
      if( !frame )
	break;

      // convert it to gray levels, if required
      if (frame->nChannels >1) {
	if( !gray ) 
	  gray = cvCreateImage( cvGetSize(frame), IPL_DEPTH_8U, 1 );
	cvCvtColor(frame, gray, CV_RGB2GRAY);
      } else {
	gray = frame;
      }

      // run the detector
      if (model.detector.detect(gray)) {
	// 2d homography found
	nbHomography++;

	// Computes 3D pose and surface normal
	model.augm.Clear();
	add_detected_homography(model.detector, model.augm);
	model.augm.Accomodate(4, 1e-4);
	CvMat *mat = model.augm.GetObjectToWorld();
	float normal[3];
	for (int j=0;j<3;j++) normal[j] = cvGet2D(mat, j, 2).val[0];
	cvReleaseMat(&mat);

	// average pixels over triangles
	lc.averageImage(frame,model.detector.H);

	// add observations
	if (!model.map.isReady())
	  model.map.addNormal(normal, lc, 0);

	if (!model.map.isReady() && nbHomography >= nbImages) {
	  if (model.map.computeLightParams()) {
	    model.map.save();
	    const float *gain = model.map.getGain(0);
	    const float *bias = model.map.getBias(0);
	    cout << "Gain: " << gain[0] << ", " << gain[1] << ", " << gain[2] << endl;
	    cout << "Bias: " << bias[0] << ", " << bias[1] << ", " << bias[2] << endl;
	  }
	} 
      } 
		
      if (model.map.isReady()) {
	double min, max;
	IplImage *map = model.map.map.getIm();
	cvSetImageCOI(map, 2);
	cvMinMaxLoc(map, &min, &max);
	cvSetImageCOI(map, 0);
	assert(map->nChannels == lightmap->nChannels);
	cvConvertScale(map, lightmap, 128, 0);
	cvShowImage("LightMap", lightmap);
	augment_scene(model, frame, display);
      } else {
	cvCopy(frame,display);
	if (model.detector.object_is_detected)
	  lc.drawGrid(display, model.detector.H);
      }

      cvShowImage(win, display);

      int k=cvWaitKey(10);
      if (k=='q' || k== 27)
	break;
    }

  cvReleaseImage(&lightmap);
  cvReleaseImage(&display);
  if (frame->nChannels > 1)
    cvReleaseImage(&gray);
  return 0;
}
Example #3
0
void DB_Analysis::OnBnClickedBtnSame()
{
	Count_File = 0;
	C_state.SetWindowTextA(_T("중복처리 진행중..."));
	UpdateData(false);

	CFileFind finder;
	BOOL bWorking = finder.FindFile(_T("C:\\output\\*.bmp"));
	while (bWorking)
	{
		bWorking = finder.FindNextFile();
		if (finder.IsDots()) continue;
		Count_File++;
	}
	finder.Close();

	// TODO: 여기에 컨트롤 알림 처리기 코드를 추가합니다.
	CString first[103]; // 처음 이미지 값의 배열
	CString second[103]; // 비교 이미지 값의 배열
	IplImage *src_image;// 이미지 불러 올값.
	int temp_sum[100] = { 0, }; //일시적 평균값 저장 장소.

	CString new_image_path("C:\\output\\");
	char cut_file_name[13];
	sprintf(cut_file_name, "%05d.bmp", 1);
	DB_array = new CString*[Count_File];
	for (int i = 0; i < Count_File; i++)
	{
		DB_array[i] = new CString[103];
	}
	first[0] = new_image_path + cut_file_name;
	first[1].Format("%d", 1);

	src_image = cvLoadImage(first[0]);

	///영상을 10 x 10으로 분할 후 한칸당 값들의 평균값 저장.
	int real_width, real_height;
	CvScalar temp_scalar;

	for (int y_block = 0; y_block < 10; y_block++)
	{
		for (int x_block = 0; x_block < 10; x_block++)
		{
			for (int col = 0; col < 64; col++)
			{
				for (int row = 0; row < 48; row++)
				{
					real_width = x_block * 64 + col;
					real_height = y_block * 48 + row;
					temp_scalar = cvGet2D(src_image, real_height, real_width);
					temp_sum[x_block + 10 * y_block] += (temp_scalar.val[0] + temp_scalar.val[1] + temp_scalar.val[2]) / 3.0f;
				}
			}
		}
	}
	for (int i = 0; i < 100; i++)
	{
		temp_sum[i] = temp_sum[i] / 3185;
		first[i + 2].Format("%d", temp_sum[i]);
	}
	for (int i = 0; i < 102; i++)
	{
		DB_array[0][i].Format(first[i]);
	}
	cvReleaseImage(&src_image);

	for (int j = 2; j <= Count_File; j++)
	{
		sprintf(cut_file_name, "%05d.bmp", j);
		second[0] = new_image_path + cut_file_name;
		second[1].Format("%d", j);
		src_image = cvLoadImage(second[0]);
		for (int y_block = 0; y_block < 10; y_block++)
		{
			for (int x_block = 0; x_block < 10; x_block++)
			{
				for (int col = 0; col < 64; col++)
				{
					for (int row = 0; row < 48; row++)
					{
						real_width = x_block * 64 + col;
						real_height = y_block * 48 + row;
					    temp_scalar = cvGet2D(src_image, real_height, real_width);
						temp_sum[x_block + 10 * y_block] += (temp_scalar.val[0] + temp_scalar.val[1] + temp_scalar.val[2]) / 3.0f;
					}
				}
			}
		}
		for (int i = 0; i < 100; i++)
		{
			temp_sum[i] = temp_sum[i] / 3185;
			second[i + 2].Format("%d", temp_sum[i]);
		}
		cvReleaseImage(&src_image);

		int count_differnet = 0;
		for (int count = 0; count < 100; count++)
		{
			if (atoi(first[2 + count]) != temp_sum[count])
				count_differnet++;
			if (count_differnet>5) break;
		}

		if (count_differnet > 5)
		{
			number_array++;
			for (int i = 0; i < 102; i++)
			{
				DB_array[number_array][i].Format(second[i]);
				first[i].Format(second[i]);
			}
		}
	}
	C_state.SetWindowTextA("중복처리 완료");
	CString count, time;
	count.Format("%d 개", Count_File - number_array);
	time.Format("%f 초", (float)(Count_File*0.06667));
	Same_count.SetWindowTextA(count);
	C_time.SetWindowTextA(time);
	UpdateData(false);
}
Example #4
0
void display()
{
	glClearColor(0.0, 0.0, 0.0, 0.0);
    	glClear(GL_COLOR_BUFFER_BIT|GL_DEPTH_BUFFER_BIT);


/*	glPushMatrix();
	glTranslatef(xavg,yavg,0);
	glutSolidCube(200);
	glPopMatrix();
/*

	glBegin(GL_QUADS);
		glVertex3f(xr,xb,0);	
		glVertex3f(xb,yb,0);	
		glVertex3f(xl,yl,0);
		glVertex3f(xt,yt,0);
	glEnd();
*/
///////////////////////////////////////////////////////////nishanthprakash20///////////////////////////////////////////////////
	captured=cvQueryFrame(video1);
	disp=cvCreateImage(cvGetSize(captured),IPL_DEPTH_8U,3);	
	eroded=cvCreateImage(cvGetSize(captured),IPL_DEPTH_8U,3);	
	dilated=cvCreateImage(cvGetSize(captured),IPL_DEPTH_8U,3);

//	data=cvGet2D(captured,240,320);
//	printf("%f,%f,%f\n",data.val[0],data.val[1],data.val[2]);
	

	thresh1=150;
	thresh2=100;
	thresh3=100;


	for(i=0;i<disp->height;i++)
	for(j=0;j<disp->width;j++)
		{
			data=cvGet2D(captured,i,j);
			
			if(data.val[1]>thresh1&&data.val[2]<thresh2&&data.val[0]<thresh3)
			{	
				cvSet2D(disp,i,j,data);
			}
		}

	cvErode(disp,eroded,NULL,1);
	cvDilate(eroded,dilated,NULL,4);
	for(i=0;i<disp->height;i++)
	for(j=0;j<disp->width;j++)
		{
			data=cvGet2D(dilated,i,j);
			
			if(data.val[1]>thresh1&&data.val[2]<thresh2&&data.val[0]<thresh3)
			{	goto donetop;
				
			}
		}
	donetop:
	xt=j;
	yt=i;
	
	for(i=479;i>0;i--)
	for(j=0;j<disp->width;j++)
		{
			data=cvGet2D(dilated,i,j);
			
			if(data.val[1]>thresh1&&data.val[2]<thresh2&&data.val[0]<thresh3)
			{	goto doneleft;
				
			}
		}
	doneleft:
	xb=j;
	yb=i;
	
	inclination=((float)atan((yt-yb)/(xt-xb))-(float)atan(10.0/21))*180/3.14;
	if(inclination<0)	inclination+=60;
	printf("%f\n",inclination);
	
	cvNamedWindow("Cap");
	cvShowImage("Cap",dilated);
	cvWaitKey(3);

//*/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
    	glColor3f(1.0, 1.0, 1.0);
	glPushMatrix();
	glTranslatef(0,0,-5);
	glRotatef(inclination,0,0,1);

	glScalef(100,100,100);
	glColor3f(0.0f,0.0f,0.0f);
	drawmodel_box();
		glColor3f(1.0f,1.0f,1.0f);
		drawmodel_box2();
		glColor3f(1.0f,1.0f,1.0f);
		drawmodel_box3();
		glColor3f(1.0f,1.0f,1.0f);
		drawmodel_box4();
		glColor3f(0.2f,0.2f,1.0f);
		drawmodel_box5();									//remove this
	//glScalef(0.01,0.01,0.01);
//glTranslatef(0,0,5);
	glPopMatrix();
	
  	glutSwapBuffers();
}
int _tmain(int argc, _TCHAR* argv[])
{
	int i, j, n, m;
	CvScalar tempValue;
	double S= 0.0;
	double Mask[3][3] = {{255., 255., 255.}, {255., 255., 255.}, {255., 255., 255.}};
	//침식 연산을 위한 마스크

	IplImage *inputImage = cvLoadImage("lena.bmp", CV_LOAD_IMAGE_GRAYSCALE);
	IplImage *binaryImage = cvCreateImage(cvGetSize(inputImage), 8, 1);
	IplImage *tempImage = cvCreateImage(cvSize(inputImage->width+2, inputImage->height+2), 8, 1);
	IplImage *outputImage = cvCreateImage(cvGetSize(inputImage), 8, 1);

	for(i=0; i<inputImage->height; i++){ //원본영상의 이진화
		for(j=0; j<inputImage->width; j++){
			tempValue = cvGet2D(inputImage, i, j);
			if(tempValue.val[0] > THRESHOLD) {
				cvSet2D(binaryImage, i, j, cvScalar(255));
			}	else	{
				cvSet2D(binaryImage, i, j, cvScalar(0));
			}
		}
	}

	for(i=0; i<binaryImage->height; i++) {
		for(j=0; j<binaryImage->width; j++){
			cvSet2D(tempImage, i+1, j+1, cvGet2D(binaryImage, i, j));
		}
	}

	for(i=0; i<binaryImage->height; i++){
		for(j=0; j<binaryImage->width; j++){
			for(n=0; n<3; n++){
				for(m=0; m<3; m++){
					tempValue = cvGet2D(tempImage, i+n, j+m);
					if(Mask[n][m] == tempValue.val[0]){
						//마스크와 같은 값이 있는지 조사
						S += 1.0;
					}
				}
			}
			if(S==9.0){
				cvSet2D(outputImage, i, j, cvScalar(255));
				//값이 모두 일치하면 출력 값은 255
			}	else {
				cvSet2D(outputImage, i, j, cvScalar(0));
				//모두 일치하지 않으면 출력 값은 0
			}
			S =0.0; // reset
		}
	}

	cvShowImage("Input Image", inputImage);
	cvShowImage("Binary Image", binaryImage);
	cvShowImage("Output Image", outputImage);

	cvWaitKey();

	cvReleaseImage(&inputImage);
	cvReleaseImage(&binaryImage);
	cvReleaseImage(&tempImage);
	cvReleaseImage(&outputImage);

	return 0;
}
Example #6
0
CvScalar MaskShift::GetPixelValue(int x, int y)
{
	return cvGet2D(_imageData, y + _shift.y, x + _shift.x);
}
Example #7
0
IplImage *preImg(std::string caminho) {

    int quadratico = 200;
    CvSize tamanho = cvSize(quadratico, quadratico);

    IplImage *in = cvLoadImage(caminho.c_str(), CV_LOAD_IMAGE_GRAYSCALE);

    IplImage *src = cvCreateImage(tamanho, in->depth, in->nChannels);
    IplImage *dst = cvCreateImage(tamanho, in->depth, in->nChannels);
    IplImage *fn = cvCreateImage(cvSize(mh, mw), in->depth, in->nChannels);

    cvResize(in, src);

    cvThreshold(src, src, 220, 255, CV_THRESH_BINARY);

    cvShowImage("tresh", src);

    cvCanny(src, src, 100, 120, 3);

    //cvShowImage("canny", src);

    cvMorphologyEx(src, src, 0, cvCreateStructuringElementEx(4, 4, 0, 0, CV_SHAPE_RECT), cv::MORPH_DILATE, 1);

    //cvShowImage("Dilatacao", src);

    std::vector<CvPoint> pontos;

    for (int y = 0; y < src->height; y++) {
        for (int x = 0; x < src->width; x++) {

            if (cvGet2D(src, x, y).val[0] == 255) {

                //inversão dos eixos
                pontos.push_back(cvPoint(y, x));
            }

        }
    }

    std::sort(pontos.begin(), pontos.end(), sortPontos);

    CvPoint interpol = getInterpolado(pontos[0], pontos[pontos.size() - 1]);

//	CvScalar color = cvScalar(255, 255, 255);
//	int radius = 6;
//	int thickness = 2;
//
//	cvCircle(src, pontos[0], radius, color, thickness);
//
//	cvCircle(src, pontos[pontos.size() - 1], radius, color, thickness);

//cvCircle(src, interpol, radius, color, thickness);

//	std::cout << cvGetReal2D(src, pontos.begin()->x, pontos.begin()->y)
//			<< std::endl;

//	cvShowImage("teste", src);

//-----------------------------

    cvLogPolar(src, dst, cvPoint2D32f(interpol.x, interpol.y), 40,
               CV_INTER_LINEAR + CV_WARP_FILL_OUTLIERS);

    //cvNamedWindow("log-polar", 1);

    //cvShowImage("log-polar", dst);

    //cvShowImage("LogPolar",dst);

    cvResize(dst, fn);

    //cvShowImage("teste saida", fn);

    return fn;

}
Example #8
0
//参数说明:nCuster为聚类的类数
int color_cluster(char *filename,int nCuster )
{
	IplImage* img=cvLoadImage(filename);

	int i,j;
	CvMat *samples=cvCreateMat((img->width)*(img->height),1,CV_32FC3);//创建样本矩阵,CV_32FC3代表32位浮点3通道(彩色图像)
	CvMat *clusters=cvCreateMat((img->width)*(img->height),1,CV_32SC1);//创建类别标记矩阵,CV_32SF1代表32位整型1通道

	int k=0;
	for (i=0;i<img->width;i++)
	{
		for (j=0;j<img->height;j++)
		{
			CvScalar s;
			//获取图像各个像素点的三通道值(BGR)
			s.val[0]=(float)cvGet2D(img,j,i).val[0];//B
			s.val[1]=(float)cvGet2D(img,j,i).val[1];//G
			s.val[2]=(float)cvGet2D(img,j,i).val[2];//R
			cvSet2D(samples,k++,0,s);//将像素点三通道的值按顺序排入样本矩阵
		}
	}

	//聚类类别数,后期可以通过学习确定分类数。
	cvKMeans2(samples,nCuster,clusters,cvTermCriteria(CV_TERMCRIT_ITER,100,1.0));//开始聚类,迭代100次,终止误差1.0

	//创建用于显示的图像,二值图像
	IplImage *binimg=cvCreateImage(cvSize(img->width,img->height),IPL_DEPTH_8U,1);
	
	//创建用于单独显示每个聚类结果的图像
	IplImage *cluster_img0=cvCreateImage(cvSize(img->width,img->height),IPL_DEPTH_8U,1);
	IplImage *cluster_img1=cvCreateImage(cvSize(img->width,img->height),IPL_DEPTH_8U,1);
	IplImage *cluster_img2=cvCreateImage(cvSize(img->width,img->height),IPL_DEPTH_8U,1);



	k=0;
	int val=0;
	float step=255/(nCuster-1);

	CvScalar bg={255,0,0,0};//背景设置为白色
	for (i=0;i<img->width;i++)
	{
		for (j=0;j<img->height;j++)
		{
			cvSet2D(cluster_img0,j,i,bg);
			cvSet2D(cluster_img1,j,i,bg);
			cvSet2D(cluster_img2,j,i,bg);
		}
	}
	for (i=0;i<img->width;i++)
	{
		for (j=0;j<img->height;j++)
		{
			val=(int)clusters->data.i[k++];
			CvScalar s;
			s.val[0]=255-val*step;//这个是将不同类别取不同的像素值,
			cvSet2D(binimg,j,i,s);	

			//将每个聚类进行分离
			switch(val)
			{
				case 0:
					cvSet2D(cluster_img0,j,i,s);break;//白色类
				case 1:
					cvSet2D(cluster_img1,j,i,s);break;//灰色类
				case 2:
					cvSet2D(cluster_img2,j,i,s);break;//黑色类
				default:
					break;
			}	
		
		}
    }


	cvSaveImage("PicVideo//cluster_img0.png",cluster_img0);
	cvSaveImage("PicVideo//cluster_img1.png",cluster_img1);
	cvSaveImage("PicVideo//cluster_img2.png",cluster_img2);


	cvNamedWindow( "原始图像", 1 ); 
	cvShowImage( "原始图像", img  );


	cvNamedWindow( "聚类图像", 1 ); 
	cvShowImage( "聚类图像", binimg  );
	cvSaveImage("PicVideo//clusterimg.png",binimg);
	cvWaitKey(0); //等待按键

	cvDestroyWindow( "原始图像" );
	cvDestroyWindow( "聚类图像" );

	cvReleaseImage( &img ); 
	cvReleaseImage( &binimg );
	cvReleaseImage(&cluster_img0);
	cvReleaseImage(&cluster_img1);
	cvReleaseImage(&cluster_img0);

	return 0;

}
Example #9
0
ImageRAII hysteresis( IplImage * image, IplImage * orientation, std::pair< int, int > thresh )
{
	const char * WINDOW_NAME = "Hysteresis Threshold";

	CvSize image_size = cvGetSize( image );
	ImageRAII hysteresis_image( cvCreateImage( image_size, image->depth, image->nChannels ) );
	// key: pixel position
	// value: visited = true, unvisited = false
	std::map< CvPoint, bool, classcomp > pixels;
	std::map< CvPoint, bool, classcomp >::iterator it;
	std::vector< std::vector< CvPoint > > edges;

	// initialize map
	for( int i = 0; i < image_size.width; i++ )
	{
		for( int j = 0; j < image_size.height; j++ )
		{
			pixels[cvPoint( i, j )] = false;
		}
	}

	// visit all pixels
	for( it = pixels.begin(); it != pixels.end(); it++ )
	{
		std::vector< CvPoint > edge;
		// find next unvisited edge pixel
		bool run = true;
		while( run  )
		{
			if( it->second == false && check_boundaries( image, it->first ) && cvGet2D( image, it->first.y, it->first.x ).val[0] > thresh.second  )
				run = false;
			if( it == pixels.end() )
				run = false;
			it++;
		}

		// mark pixel as visited
		CvPoint current_pixel = it->first;
		it->second = true;
		edge.push_back( current_pixel );

		// follow links forward
		std::pair< CvPoint, CvPoint > positions = get_edge_positions( orientation, current_pixel.x, current_pixel.y );

		// go forward
		CvPoint forward = positions.first;
		while( check_boundaries( image, forward ) && cvGet2D( image, forward.y, forward.x ).val[0] > thresh.first )
		{
			// mark pixel as visited
			edge.push_back( forward );
			pixels.find( forward )->second = true;
			
			std::pair< CvPoint, CvPoint > forward_positions = get_edge_positions( orientation, forward.x, forward.y );
			forward = forward_positions.first;
		}

		// go backward
		CvPoint backward = positions.second;
		while( check_boundaries( image, backward ) && cvGet2D( image, backward.y, backward.x ).val[0] > thresh.first )
		{
			// mark pixel as visited
			edge.push_back( backward );
			pixels.find( backward )->second = true;

			std::pair< CvPoint, CvPoint > backward_positions = get_edge_positions( orientation, backward.x, backward.y );
			backward = backward_positions.second;
		}

		// store this edge
		edges.push_back( edge );
	}

	int size = 0;
	// set the edges in the image
	std::vector< std::vector< CvPoint > >::iterator edges_iterator;
	for( edges_iterator = edges.begin(); edges_iterator < edges.end(); edges_iterator++ )
	{
		std::vector< CvPoint >::iterator edge_iterator;
		std::vector< CvPoint > edge = *edges_iterator;
		for( edge_iterator = edge.begin(); edge_iterator < edge.end(); edge_iterator++ )
		{
			size++;
			CvPoint pixel = *edge_iterator;
			CvScalar e;
			e.val[0] = GRAY;
			cvSet2D( hysteresis_image.image, pixel.y, pixel.x, e );
		}
	}

	cvNamedWindow( WINDOW_NAME );
	cvShowImage( WINDOW_NAME, hysteresis_image.image );
	cvMoveWindow( WINDOW_NAME, image_size.width * 3, 0 );

	return hysteresis_image;
}
Example #10
0
int writeDataToImage(IplImage *img, PixelMap *pixelmap, FILE *file, ConfigInfo config, unsigned int useable_pixels)
{

	CvScalar pixel;
	int index, number;
	int i,j, written_bytes=0;
	int message_length;
	unsigned int img_capacity;
	char byte;

	int img_channels = 0;

	if(img->nChannels == 1)
	{
		img_channels = 1;
	}
	if(img->nChannels >= 3){
		img_channels = 3;
	}

	img_capacity = (unsigned int)( ((useable_pixels * img_channels) - MESSAGE_LENGTH_BITS) / BYTE );

	message_length = getFileLength(file);

	if(img_capacity < message_length)
	{
		printf("Image capacity is only: %u Byte -> message size is: %d\n", img_capacity, message_length);
		return 0;
	}

	printf("Image capacity is: %u Byte\n", img_capacity);
	printf("Message size is: %d Byte\n", message_length);


	DEBUG( ("writing message length bits to image\n") );

	for(i=MESSAGE_LENGTH_BITS-1; i>=0; )
	{

		//get next free pixel
		do
		{
			number = getNextRandom();

			index = number % useable_pixels;
		}
		while( pixelmap[index].used == TRUE );

		//get values at this place
		pixel = cvGet2D(img, pixelmap[index].x_coord, pixelmap[index].y_coord);


		//if we have more than one channel
		for(j=0; j<img_channels; j++)
		{
			pixel.val[j] = LSB_BIT( (int)pixel.val[j],  ((message_length >> i) & 1) );

			DEBUG( ("%u", ((message_length >> i) & 1)) );

			i--;

			if(i < 0)
			{
				break;
			}
		}

		cvSet2D(img, pixelmap[index].x_coord, pixelmap[index].y_coord, pixel);

		pixelmap[index].used = TRUE;

	}
	DEBUG( ("\n") );

	getNextRandom();//fix -> asynchron


	/////////////////////////////////////////////////////////////////////////////
	DEBUG( ("writing bytes to image...") );

	j=3;

	while( (EOF != (byte = getByteFromFile(file))) && (written_bytes < img_capacity) && (written_bytes < message_length) )
	{

		for(i=BYTE-1; i>=0; )
		{
			if(j>=img_channels)
			{
				j=0;

				do
				{
					number = getNextRandom();

					index = number % useable_pixels;
				}
				while( pixelmap[index].used == TRUE );

				pixel = cvGet2D(img, pixelmap[index].x_coord, pixelmap[index].y_coord);
			}
			else
			{
				j++;
			}


			while(j<img_channels)
			{
				pixel.val[j] = LSB_BIT( (int)pixel.val[j],  ((byte >> i) & 1) );

				i--;

				if(i < 0)
				{
					break;
				}

				j++;
			}

			cvSet2D(img, pixelmap[index].x_coord, pixelmap[index].y_coord, pixel);

			pixelmap[index].used = TRUE;
		}

		written_bytes++;

	}
	DEBUG( ("done\n") );


	printf("%d Bytes written to image\n", written_bytes);


	return written_bytes;
}
Example #11
0
void cveGet2D(CvArr* arr, int idx0, int idx1, CvScalar* value)
{
   *value = cvGet2D(arr, idx0, idx1);
}
Example #12
0
int readDataFromImage(IplImage *img, PixelMap *pixelmap, FILE *file, ConfigInfo config, unsigned int useable_pixels)
{
	CvScalar pixel;
	int index, number;
	int i,j;
	unsigned int message_length=0;
	unsigned int img_capacity;
	unsigned int extracted_bytes=0;
	char byte=0;

	int img_channels = 0;

	if(img->nChannels == 1)
	{
		img_channels = 1;
	}
	if(img->nChannels >= 3){
		img_channels = 3;
	}

	img_capacity = (unsigned int)( ((useable_pixels * img_channels) - MESSAGE_LENGTH_BITS) / BYTE );

	printf("Image capacity is: %u Byte\n", img_capacity);


	initFile(file);


	DEBUG( ("extracting message size bits \n") );

	for(i=0; i<=MESSAGE_LENGTH_BITS; )
	{

		//get next free pixel
		do
		{
			number = getNextRandom();

			index = number % useable_pixels;
		}
		while( pixelmap[index].used == TRUE );

		//get values at this place
		pixel = cvGet2D(img, pixelmap[index].x_coord, pixelmap[index].y_coord);

		//if we have more than one channel
		for(j=0; j<img_channels; j++)
		{
			message_length |= (int)pixel.val[j] & 1;

			i++;

			if(i < MESSAGE_LENGTH_BITS)
			{
				message_length <<= 1;
			}
			else
			{
				break;
			}

			DEBUG( ("%u", ((int)pixel.val[j] & 1) ) );
		}

		pixelmap[index].used = TRUE;

	}
	DEBUG( ("\n") );

	if(message_length > img_capacity)
	{
		printf("The embedded message size (%d) does not fix to image's capacity (%d)\n", message_length, img_capacity);
		printf("Please check if you have chosen the correct parameters and password\n");
		return 0;
	}

	printf("Integrated message size is: %d Byte\n", message_length);


	////////////////////////////////////////////////////////////////////////////////////////
	DEBUG( ("extracting bytes out of image...") );

	j=3;

	while( extracted_bytes < message_length )
	{
		byte=0;

		for(i=0; i<BYTE; )
		{

			if(j>=img_channels)
			{
				j=0;

				do
				{
					number = getNextRandom();

					index = number % useable_pixels;
				}
				while( pixelmap[index].used == TRUE );

				pixel = cvGet2D(img, pixelmap[index].x_coord, pixelmap[index].y_coord);
			}
			else
			{
				j++;
			}


			while(j<img_channels)
			{
				byte |= (int)pixel.val[j] & 1;

				i++;

				if(i < BYTE)
				{
					byte <<= 1;
				}
				else
				{
					break;
				}

				j++;
			}

			pixelmap[index].used = TRUE;
		}

		writeByteToFile(file, byte);

		extracted_bytes++;

	}
	DEBUG( ("done\n") );


	printf("%d Bytes extracted from image\n", extracted_bytes);


	return extracted_bytes;
}
Example #13
0
int main (int argc, char ** argv)
{
	const char *fileName = "../Images/lena.jpg";

	double** V;
	double** VDCT;
	int i;
	int j;
	CvScalar s;
	CvScalar s_img;

	if (argc > 1)
	{
		fileName = argv[1];
	}

	IplImage* src = cvLoadImage ( fileName, 3);
	IplImage* dct = cvCreateImage ( cvGetSize(src) , src->depth, src->nChannels);
	IplImage* img = cvCreateImage( cvGetSize(src), src->depth, 1);

	/**
	*Y  =      (0.257 * R) + (0.504 * G) + (0.098 * B) + 16
	*Cr = V =  (0.439 * R) - (0.368 * G) - (0.071 * B) + 128
	*Cb = U = -(0.148 * R) - (0.291 * G) + (0.439 * B) + 128
	**/	
	
	cvCvtColor (src, dct, CV_BGR2HSV);

	/**
	*On va travailler sur le canal V
	**/
	

	//Allocation de la matrice correspondant au canal V

	V = (double**) malloc(dct->width* sizeof(double*));

	for (i = 0; i < dct->width; i++)
	{
		V[i] = (double*) malloc(dct->height*sizeof(double));
	}

	//Initialisation de la matrice V à 0

	for (i = 0; i <src->width; i++)
	{
		for (j = 0; j < src->height; j++)
		{
			V[i][j] = 0;
		}
	}

	//On récupère les valeurs du canal V dans la matrice associée à celui-ci

	for (i = 0; i < src->width; i++)
	{
		for (j = 0; j < src->height; j++)
		{
			s = cvGet2D(dct,i,j);
			V[i][j] = s.val[2];
		}
	}
	

	//Allocation de la matrice VDCT (deuxième méthode)

	VDCT = alocamd( src->width, src->height);

	//Initialisation à 0 de VDCT

	for (i = 0; i <src->width; i++)
	{
		for (j = 0; j < src->height; j++)
		{
			VDCT[i][j] = 0;
		}
	}

	//Transformation DCT de la matrice V associé au canal "V" vers la matrice VDCT

	dct2dim( V, VDCT, src->width, src->height);

	//On met le VDCT comprenant le canal V (et seulement celui-ci) après transformation DCT dans le canal 0 de img

	for (i = 0; i < src->width; i++)
	{
		for (j = 0; j < src->height; j++)
		{
			s_img = cvGet2D(img,i,j);
			s_img.val[0] = VDCT[i][j];
			cvSet2D(img, i, j, s_img);
		}
	}

	//Affichage

	cvNamedWindow("YUV", CV_WINDOW_AUTOSIZE);
	cvShowImage("YUV", img);
	cvWaitKey(0);
	cvDestroyAllWindows();
}
int main_video_mask_alpha_join(int argc, char* argv[]) {

	int _black_min = 20; 
	int _black_max = 50;
	cv::namedWindow("Color");
	cv::createTrackbar("Black", "Color", &_black_min, _black_max);


	//CvCapture* capture_mask = cvCreateFileCapture("Split/01BW.mp4");
	//CvCapture* capture_color = cvCreateFileCapture("Split/01color.mp4");
	//CvCapture* capture_mask = cvCreateFileCapture("Split/02BW.mp4");
	//CvCapture* capture_color = cvCreateFileCapture("Split/02color.mp4");
	//CvCapture* capture_mask = cvCreateFileCapture("Split/03BW.mp4");
	//CvCapture* capture_color = cvCreateFileCapture("Split/03color.mp4");
	CvCapture* capture_mask = cvCreateFileCapture("Split/04BW.mp4");
	CvCapture* capture_color = cvCreateFileCapture("Split/04color.mp4");

	// Create IplImage to point to each frame 
	IplImage* frame_color;
	IplImage* frame_mask;
	IplImage* frame_processed;

	// Loop until frame ended or ESC is pressed 
	frame_color = cvQueryFrame(capture_color);
	frame_mask = cvQueryFrame(capture_mask);
	frame_processed = cvCloneImage(frame_color);

	printf("Color:%dx%d\n", frame_color->width, frame_color->height);
	printf("Mask:%dx%d\n", frame_mask->width, frame_mask->height);

	double fps = cvGetCaptureProperty(capture_color, CV_CAP_PROP_FPS);
	CvSize size = cvSize(cvGetCaptureProperty(capture_color, CV_CAP_PROP_FRAME_WIDTH), cvGetCaptureProperty(capture_color, CV_CAP_PROP_FRAME_HEIGHT));
	CvVideoWriter* writer = cvCreateVideoWriter("Split/result04.avi", CV_FOURCC('D', 'I', 'V', 'X'), fps, size);

	CvScalar _green;
	_green.val[0] = 0;
	_green.val[1] = 255;
	_green.val[2] = 0;

	char c;
	int ac = 0;
	while (1) {
		c = cvWaitKey(33);
		//processKey(c);
		if (c == 27) break;

		//if (c == 'n') {
		//	frame_color = cvQueryFrame(capture_color); 
		//	frame_mask = cvQueryFrame(capture_mask);
		//	frame_processed = cvCloneImage(frame_color);
		//}
		//else {

			frame_color = cvQueryFrame(capture_color);
			frame_mask = cvQueryFrame(capture_mask);

			if (frame_color == NULL) {
				printf("NULL");
			}
			// exit loop if fram is null / movie end 
			if (!frame_color || !frame_mask)  break;

			frame_processed = cvCloneImage(frame_color);
		//}


		

		for( int i = 0; i < frame_mask->height; i++ ) {
			for( int j = 0; j < frame_mask->width-1; j++ ) {
				CvScalar _aux; 
				_aux=cvGet2D(frame_mask,i,j);

				if(_aux.val[0] <= _black_min && _aux.val[1] <= _black_min && _aux.val[2] <= _black_min){
					cvSet2D(frame_processed, i, j, _green);
				}
			}
		}

		//for (int i = 0; i < frame_color->height; i++) {
		//	for (int j = frame_mask->width-4; j < frame_mask->width; j++) {
		//		cvSet2D(frame_processed, i, j, _green);
		//	}
		//}

		//for (int i = 0; i < 2; i++) {
		//	for (int j = 0; j < frame_mask->width; j++) {
		//		cvSet2D(frame_processed, i, j, _green);
		//	}
		//}


		cvShowImage("Color", frame_color);
		cvShowImage("Mask", frame_mask);
		cvShowImage("Processed", frame_processed);

		cvWriteFrame(writer, frame_processed);

		cvReleaseImage(&frame_processed);
		//cvReleaseImage(&frame_mask);
	}

	cvReleaseVideoWriter(&writer);

	// destroy pointer to video 
	cvReleaseCapture(&capture_color);
	cvReleaseCapture(&capture_mask);
	// delete window 
	//cvDestroyWindow("Example2");


	return 0;
}
Example #15
0
// mask is of the same size as input
MaskShift* CreateMaskShift(IplImage* input, IplImage* mask, CvPoint shift, CvSize outputSize)
{
	IplImage* outputMask = cvCreateImage(outputSize, IPL_DEPTH_8U, 3);

	IplImage* outputTest = cvCreateImage(outputSize, IPL_DEPTH_8U, 3);
	IplImage* outputData = cvCreateImage(outputSize, IPL_DEPTH_8U, 3);
	for(int i = 0; i < outputMask->width; i++)
		for(int j = 0; j < outputMask->height; j++)
		{
			CvPoint inputPixel;
			inputPixel.x = shift.x + i;
			inputPixel.y = shift.y + j;
			if(!IsOutside(inputPixel, cvSize(input->width, input->height)))
			{
				cvSet2D(outputMask, j, i, cvGet2D(mask, inputPixel.y, inputPixel.x)); 
			}
			else
			{
				cvSet2D(outputMask, j, i, cvScalar(255));				 
			}
		}
	for(int i = 0; i < outputSize.width; i++)
		for(int j = 0; j < outputSize.height; j++)
		{
			cvSet2D(outputData,j,i, cvScalar(231,233,233));	
		}
	for(int i = 0; i < outputSize.width; i++)
		for(int j = 0; j < outputSize.height; j++)
		{
			
			if(IsMaskedPixel(i, j, outputMask))
			{
				// check neighbor
				CvPoint inputPixel;
				inputPixel.x = shift.x + i;
				inputPixel.y = shift.y + j;
				CvScalar value;
				CvSize inputSize = cvSize(input->width, input->height);
				
				if(!IsOutside(cvPoint(inputPixel.x, inputPixel.y), inputSize) && 
					!IsOutside(cvPoint(i,j),  outputSize))
				{
					value = cvGet2D(input, inputPixel.y, inputPixel.x);
					cvSet2D(outputData, j, i, value);
				}

				if(!IsOutside(cvPoint(inputPixel.x+1, inputPixel.y), inputSize) && 
					!IsOutside(cvPoint(i+1,j),  outputSize))
				{
					value = cvGet2D(input, inputPixel.y, inputPixel.x + 1);
					 
					cvSet2D(outputData, j, i+1, value);
				}

				if(!IsOutside(cvPoint(inputPixel.x - 1, inputPixel.y), inputSize) && 
					!IsOutside(cvPoint(i-1,j),  outputSize))
				{
					value = cvGet2D(input, inputPixel.y, inputPixel.x - 1);
					cvSet2D(outputData, j, i-1, value);
				}
 
				if(!IsOutside(cvPoint(inputPixel.x, inputPixel.y+1), inputSize) && 
					!IsOutside(cvPoint(i,j+1),  outputSize))
				{
					value = cvGet2D(input, inputPixel.y + 1, inputPixel.x);
					
					cvSet2D(outputData, j+1, i, value);
				}


				if(!IsOutside(cvPoint(inputPixel.x, inputPixel.y-1), inputSize) && 
					!IsOutside(cvPoint(i,j-1),  outputSize))
				{
					value = cvGet2D(input, inputPixel.y - 1, inputPixel.x);
					cvSet2D(outputData, j-1, i, value);
				}
			}
		}

		int j2 =21;
		int i2 = 5;	 
		CvScalar value2 = cvGet2D(outputData, j2+1, i2);
 
	for(int i = 0; i < outputSize.width; i++)
		for(int j = 0; j < outputSize.height; j++)
		{
			if(IsMaskedPixel(i, j, outputMask))
			{
				// check neighbor
				CvScalar value;
				value = cvGet2D(outputData, j, i);
				if(value.val[0] == 231 && value.val[1] == 233 && value.val[2] == 233)
					printf("Test");
				value = cvGet2D(outputData, j+1, i);
				if(value.val[0] == 231 && value.val[1] == 233 && value.val[2] == 233)
					printf("Test");
				value = cvGet2D(outputData, j-1, i);
				if(value.val[0] == 231 && value.val[1] == 233 && value.val[2] == 233)
					printf("Test");
				value = cvGet2D(outputData, j, i+1);
				if(value.val[0] == 231 && value.val[1] == 233 && value.val[2] == 233)
					printf("Test");
				value = cvGet2D(outputData, j, i-1);
				if(value.val[0] == 231 && value.val[1] == 233 && value.val[2] == 233)
					printf("Test");
			}
		}
	//cvNamedWindow("Test");
	//while(1)
	//{
	//	cvShowImage("Test", outputData);
	//	cvWaitKey(100);
	//}
	MaskShift* maskShift = new MaskShift(outputMask, input);
	maskShift->SetMaskData(outputData);
	return maskShift;
}
Example #16
0
int image_analysis(const int argc, const char * argv[])
{
#define NUM 5

	char fname[4096];
	unsigned u, v, q, vec_per_class, vec_num, cl_ind, yes, res, vec_class[5], max_iter[NUM], * d, * test_d;
	double C[NUM], tau[NUM], epsilon[NUM], ** x, ** test_x;
	double (* K[NUM])(const double *, const double *, unsigned);
	int (* selector[NUM])(const fmll_svm *, const double **, const char *, const unsigned, int *, int *, const double, const double, const double, const double *, const double *, const double **);
	IplImage * src_teach, * src_test, * dst;
	CvSize size_teach, size_test;
	CvScalar pixel, pixel_white, pixel_red, pixel_green, pixel_blue, pixel_violet, pixel_black, pixel_yellow;
	fmll_svm_net * svm_net;

	/* ############################################################################ */

	if(argc != 3)
	{
		/* У C90 "проблемы" с максимальной длиной строки кода */

		printf("\nДемонстрация нейронной сети, состоящей из нескольких машин опорных векторов.\n\n");
		printf("Запуск программы:\n\n");
		printf("ex_svm_net DIR IMAGE_1\n\n");
		printf("Где:\n\n");
		printf("\tDIR - путь и имя каталога, в котором должны содержаться следующие файлы:\n\n");
		printf("\t\tteach.png - файл с изображением, на основании которого будет составлена обучающая выборка;\n");
		printf("\t\ttest.png - файл с изображением, классификация пикселей которого будет выполнена;\n\n");
		printf("\tIMAGE_1 - путь и имя файла, в который будет сохранено результирующее изображение ");
		printf("(белый, красный, зеленый, синий, фиолетовый - правильно классифицированные пиксели; черный - неправильно классифицированные пиксели; ");
		printf("желтый - неклассифицированные пиксели).\n\n");

		return -1;
	}

	/* ############################################################################ */

	memset(vec_class, 0, sizeof(unsigned) * 5);

	sprintf(fname, "%s/teach.png", argv[1]);
	src_teach = cvLoadImage(fname, CV_LOAD_IMAGE_COLOR);
	size_teach = cvGetSize(src_teach);

	sprintf(fname, "%s/test.png", argv[1]);
	src_test = cvLoadImage(fname, CV_LOAD_IMAGE_COLOR);
	size_test = cvGetSize(src_test);

	dst = cvCreateImage(size_test, IPL_DEPTH_8U, 3);

	pixel_white.val[0] = 255;
	pixel_white.val[1] = 255;
	pixel_white.val[2] = 255;
	pixel_white.val[3] = 0;

	pixel_red.val[0] = 0;
	pixel_red.val[1] = 0;
	pixel_red.val[2] = 255;
	pixel_red.val[3] = 0;

	pixel_green.val[0] = 0;
	pixel_green.val[1] = 255;
	pixel_green.val[2] = 0;
	pixel_green.val[3] = 0;

	pixel_black.val[0] = 0;
	pixel_black.val[1] = 0;
	pixel_black.val[2] = 0;
	pixel_black.val[3] = 0;

	pixel_blue.val[0] = 255;
	pixel_blue.val[1] = 0;
	pixel_blue.val[2] = 0;
	pixel_blue.val[3] = 0;
	
	pixel_violet.val[0] = 255;
	pixel_violet.val[1] = 0;
	pixel_violet.val[2] = 255;
	pixel_violet.val[3] = 0;

	pixel_yellow.val[0] = 0;
	pixel_yellow.val[1] = 255;
	pixel_yellow.val[2] = 255;
	pixel_yellow.val[3] = 0;

	vec_per_class = size_teach.height * size_teach.width / 2000;
	vec_num = vec_per_class * 5;

	x = (double **) fmll_alloc(sizeof(double), 2, vec_num, 3);
	d = fmll_alloc(sizeof(unsigned), 1, vec_num);

	test_x = (double **) fmll_alloc(sizeof(double), 2, size_test.height * size_test.width, 3);
	test_d = fmll_alloc(sizeof(unsigned), 1, size_test.height * size_test.width);

	for(v = 0, q = 0; v < size_teach.height; v++)
		for(u = 0; u < size_teach.width; u++)
		{
			pixel = cvGet2D(src_teach, v, u);

			if(pixel.val[0] == 0 && pixel.val[1] == 237 && pixel.val[2] == 95)
				cl_ind = 0;
			else if(pixel.val[0] == 10 && pixel.val[1] == 169 && pixel.val[2] == 203)
				cl_ind = 1;
			else if(pixel.val[0] == 0 && pixel.val[1] == 255 && pixel.val[2] == 255)
				cl_ind = 2;
			else if(pixel.val[0] == 255 && pixel.val[1] == 0 && pixel.val[2] == 12)
				cl_ind = 3;
			else
				cl_ind = 4;

			if(vec_class[cl_ind] < vec_per_class)
			{
				x[q][0] = pixel.val[0];
				x[q][1] = pixel.val[1];
				x[q][2] = pixel.val[2];
				d[q] = cl_ind;

				vec_class[cl_ind]++;
				q++;
			}
		}

	for(v = 0, q = 0; v < size_test.height; v++)
		for(u = 0; u < size_test.width; u++, q++)
		{
			pixel = cvGet2D(src_test, v, u);

			if(pixel.val[0] == 0 && pixel.val[1] == 237 && pixel.val[2] == 95)
				test_d[q] = 0;
			else if(pixel.val[0] == 10 && pixel.val[1] == 169 && pixel.val[2] == 203)
				test_d[q] = 1;
			else if(pixel.val[0] == 0 && pixel.val[1] == 255 && pixel.val[2] == 255)
				test_d[q] = 2;
			else if(pixel.val[0] == 255 && pixel.val[1] == 0 && pixel.val[2] == 12)
				test_d[q] = 3;
			else
				test_d[q] = 4;

			test_x[q][0] = pixel.val[0];
			test_x[q][1] = pixel.val[1];
			test_x[q][2] = pixel.val[2];
		}

	cvReleaseImage(& src_teach);
	cvReleaseImage(& src_test);

	/* ############################################################################ */
	
	for(u = 0; u < NUM; u++)
		K[u] = & fmll_kernel_radial;

	svm_net = fmll_svm_net_init(NUM, 3, K);

	/* ############################################################################ */

	for(u = 0; u < NUM; u++)
	{
		C[u] = 1;
		tau[u] = 1E-12;
		selector[u] = & fmll_svm_teach_smo_selector_fan_chen_lin;
		max_iter[u] = 10000;
		epsilon[u] = 1E-3;
	}

	fmll_svm_net_teach_smo(svm_net, (const double **) x, d, vec_num, C, tau, selector, max_iter, epsilon);

	/* ############################################################################ */

	yes = fmll_svm_net_test(svm_net, (const double **) test_x, test_d, size_test.width * size_test.height, NULL, NULL);
	
	printf("\nВерно классифицированных пикселей: %u из %u (%.7f %%)\n",
			yes, (size_test.width * size_test.height), (100.0 * yes) / (size_test.width * size_test.height));

	/* ############################################################################ */

	fmll_svm_net_save(svm_net, "svm_net");
	fmll_svm_net_destroy(svm_net);
	svm_net = fmll_svm_net_load("svm_net", K);

	for(v = 0, q = 0, yes = 0; v < size_test.height; v++)
		for(u = 0; u < size_test.width; u++, q++)
		{
			res = fmll_svm_net_run(svm_net, test_x[q], NULL);

			if(res >= 0)
			{
				if(res == test_d[q])
				{
					yes++;

					switch(res)
					{
						case 0:
						{
							cvSet2D(dst, v, u, pixel_white);
							break;
						}
						case 1:
						{
							cvSet2D(dst, v, u, pixel_red);
							break;
						}
						case 2:
						{
							cvSet2D(dst, v, u, pixel_green);
							break;
						}
						case 3:
						{
							cvSet2D(dst, v, u, pixel_blue);
							break;
						}
						case 4:
						{
							cvSet2D(dst, v, u, pixel_violet);
							break;
						}
					}
				}
				else
					cvSet2D(dst, v, u, pixel_black);
			}
			else if(res == -1)
				cvSet2D(dst, v, u, pixel_yellow);
			else
				cvSet2D(dst, v, u, pixel_black);
		}

	printf("Верно классифицированных пикселей: %u из %u (%.7f %%)\n",
			yes, (size_test.width * size_test.height), (100.0 * yes) / (size_test.width * size_test.height));

	/* ############################################################################ */
	
	fmll_free(x);
	fmll_free(d);
	fmll_free(test_x);
	fmll_free(test_d);
	fmll_svm_net_destroy(svm_net);

	cvSaveImage(argv[2], dst, NULL);
	cvReleaseImage(& dst);

	return 0;
}
Example #17
0
void CreateMask(IplImage* input, IplImage* mask, CvPoint shift, CvSize outputSize, IplImage* outputMask, IplImage* outputData)
{
	// check size
	if(outputMask->width != outputSize.width || outputMask->height != outputSize.height)
		return;
	if(outputData->width != outputSize.width || outputData->height != outputSize.height)
		return;
	
	 
	
	for(int i = 0; i < outputMask->width; i++)
		for(int j = 0; j < outputMask->height; j++)
		{
			CvPoint inputPixel;
			inputPixel.x = shift.x + i;
			inputPixel.y = shift.y + j;
			if(!IsOutside(inputPixel, cvSize(input->width, input->height)))
			{
				cvSet2D(outputMask, j, i, cvGet2D(mask, inputPixel.y, inputPixel.x)); 
			}
			else
			{
				cvSet2D(outputMask, j, i, cvScalar(255));				 
			}
		}
	for(int i = 0; i < outputSize.width; i++)
		for(int j = 0; j < outputSize.height; j++)
		{
			cvSet2D(outputData,j,i, cvScalar(231,233,233));	
		}
	for(int i = 0; i < outputSize.width; i++)
		for(int j = 0; j < outputSize.height; j++)
		{
			
			if(IsMaskedPixel(i, j, outputMask))
			{
				// check neighbor
				CvPoint inputPixel;
				inputPixel.x = shift.x + i;
				inputPixel.y = shift.y + j;
				CvScalar value;
				CvSize inputSize = cvSize(input->width, input->height);
				
				if(!IsOutside(cvPoint(inputPixel.x, inputPixel.y), inputSize) && 
					!IsOutside(cvPoint(i,j),  outputSize))
				{
					value = cvGet2D(input, inputPixel.y, inputPixel.x);
					cvSet2D(outputData, j, i, value);
				}

				if(!IsOutside(cvPoint(inputPixel.x+1, inputPixel.y), inputSize) && 
					!IsOutside(cvPoint(i+1,j),  outputSize))
				{
					value = cvGet2D(input, inputPixel.y, inputPixel.x + 1);
					 
					cvSet2D(outputData, j, i+1, value);
				}

				if(!IsOutside(cvPoint(inputPixel.x - 1, inputPixel.y), inputSize) && 
					!IsOutside(cvPoint(i-1,j),  outputSize))
				{
					value = cvGet2D(input, inputPixel.y, inputPixel.x - 1);
					cvSet2D(outputData, j, i-1, value);
				}
 
				if(!IsOutside(cvPoint(inputPixel.x, inputPixel.y+1), inputSize) && 
					!IsOutside(cvPoint(i,j+1),  outputSize))
				{
					value = cvGet2D(input, inputPixel.y + 1, inputPixel.x);
					
					cvSet2D(outputData, j+1, i, value);
				}


				if(!IsOutside(cvPoint(inputPixel.x, inputPixel.y-1), inputSize) && 
					!IsOutside(cvPoint(i,j-1),  outputSize))
				{
					value = cvGet2D(input, inputPixel.y - 1, inputPixel.x);
					cvSet2D(outputData, j-1, i, value);
				}
			}
		}		
	
		for(int i = 0; i < outputSize.width - 1; i++)
		for(int j = 0; j < outputSize.height - 1; j++)
		{
			if(IsMaskedPixel(i, j, outputMask))
			{
				// check neighbor
				CvScalar value;
				value = cvGet2D(outputData, j, i);
				if(value.val[0] == 231 && value.val[1] == 233 && value.val[2] == 233)
					printf("Test");
				value = cvGet2D(outputData, j+1, i);
				if(value.val[0] == 231 && value.val[1] == 233 && value.val[2] == 233)
					printf("Test");
				value = cvGet2D(outputData, j-1, i);
				if(value.val[0] == 231 && value.val[1] == 233 && value.val[2] == 233)
					printf("Test");
				value = cvGet2D(outputData, j, i+1);
				if(value.val[0] == 231 && value.val[1] == 233 && value.val[2] == 233)
					printf("Test");
				value = cvGet2D(outputData, j, i-1);
				if(value.val[0] == 231 && value.val[1] == 233 && value.val[2] == 233)
					printf("Test");
			}
		}

}
Example #18
0
void detectAndDisplay(IplImage *frame)
{
    IplImage *frame_gray;
    int ForCount1, ForCount2;
    ForCount1 = ForCount2 = 0;

    memset(pointXY, 0, sizeof(char)*column*row);

    frame_gray = cvCreateImage( cvGetSize(frame),IPL_DEPTH_8U,1);
    //frame_gray = cvCreateImage( cvSize(column, row),IPL_DEPTH_8U,1);
    dst = cvCreateImage(cvGetSize(frame),IPL_DEPTH_8U,1);
    //dst = cvCreateImage(cvSize(column,row),IPL_DEPTH_8U,1);
   
#ifdef showXY
    showSumX = cvCreateImage(cvGetSize(frame),IPL_DEPTH_8U,1);
    showSumY = cvCreateImage(cvSize(row,column),IPL_DEPTH_8U,1);
#endif

    cvCvtColor(frame, frame_gray, CV_BGR2GRAY);
    //cvCanny(frame_gray, dst, 40, 40*3, 3);
    cvThreshold(frame_gray, dst, Threshold, 255, CV_THRESH_BINARY);
 
    for(ForCount1 = 0; ForCount1 < column; ForCount1++)
    {
        for(ForCount2 = 0; ForCount2 < row; ForCount2++)
        {
            CvScalar s = cvGet2D(dst,ForCount2,ForCount1); 
            //char s = ((uchar *)(dst->imageData + ForCount1*dst->widthStep))[ForCount2];
#ifdef Detail
            printf("%3d %3d %f\n",ForCount1,ForCount2, s.val[0]);
#endif
            /*if( s.val[0] <= Threshold)
            {
                pointXY[ForCount1][ForCount2] = 1; 
            }*/
            if(s.val[0] <= Threshold)
            {
                pointXY[ForCount1][ForCount2] = 0;
            }
            else
            {
                pointXY[ForCount1][ForCount2] = 1;
            }
        }
    }

    Integral(row);
    Integral(column);
    Error1();
    Error2();

    cvShowImage("Webcam",dst);
    cvShowImage("Webcam1",frame_gray);

#ifdef Detail
    for(ForCount1 = 0; ForCount1 < column; ForCount1++)
    {
        printf("x[%3d]:%d\n", ForCount1, SumX[ForCount1]);
    }
    printf("\n");
    for(ForCount1 = 0; ForCount1 < row; ForCount1++)
    {
        printf("y[%3d]:%d\n", ForCount1, SumY[ForCount1]);
    }
    printf("\n");
#endif

#ifdef showXY
    for(ForCount1 = 0; ForCount1 < column; ForCount1++)
    {
        for(ForCount2 = 0; ForCount2 < (int)SumX[ForCount1]; ForCount2++)
        {
            CvScalar s = cvGet2D(showSumX,ForCount2,ForCount1); 
            s.val[0] = 255;
            cvSet2D(showSumX, ForCount2, ForCount1, s);
        }
    }
    cvShowImage("SumX", showSumX);

    for(ForCount1 = 0; ForCount1 < row; ForCount1++)
    {
        for(ForCount2 = 0; ForCount2 < (int)SumY[ForCount1]; ForCount2++)
        {
            CvScalar s = cvGet2D(showSumY,ForCount2,ForCount1); 
            s.val[0] = 255;
            cvSet2D(showSumY, ForCount2, ForCount1, s);
   
        }
    }

#endif

    //cvShowImage("SumY", showSumY);
    cvReleaseImage( &dst );
    cvReleaseImage( &frame_gray );
#ifdef showXY
    cvReleaseImage( &showSumX );
    cvReleaseImage( &showSumY );
#endif
}
Example #19
0
CvScalar MaskShift::GetPixelValue(int x, int y, IplImage* image)
{
	return cvGet2D(image, y + _shift.y, x + _shift.x);
}
Example #20
0
void Error1()
{
    int ForCount1 = 0;
    int ForCount2 = 0;
    
    memset(SumX1, 0, sizeof(short)*column);
/*    for(ForCount1 = 0; ForCount1 < column; ForCount1++)
    {
        for(ForCount2 = YUpperBoundPos -1; ForCount2 >= (YUpperBoundPos/2); ForCount2--)
        {
            SumX1[ForCount1] = SumX1[ForCount1] + pointXY[ForCount1][ForCount1];
        }
        if(ForCount1 > 3 || SumX1[ForCount1] > 4 && SumX1[ForCount1-1] > 4 && SumX1[ForCount1-2] > 4 && SumX1[ForCount1-3] >4 )
        {
            for(ForCount2 = YUpperBoundPos -1; ForCount2 >= (YUpperBoundPos/2); ForCount2--)
            {
                if(pointXY[ForCount1][ForCount2] == 1)
                {
                    CvScalar s = cvGet2D(dst,ForCount2,ForCount1); 
                    s.val[0] = 150;
                    cvSet2D(dst, ForCount2, ForCount1, s);

                }
                if(pointXY[ForCount1-1][ForCount2] == 1)
                {
                    CvScalar s = cvGet2D(dst,ForCount2,ForCount1-1); 
                    s.val[0] = 150;
                    cvSet2D(dst, ForCount2, ForCount1-1, s);

                }
                if(pointXY[ForCount1-2][ForCount2] == 1)
                {
                    CvScalar s = cvGet2D(dst,ForCount2,ForCount1-2); 
                    s.val[0] = 150;
                    cvSet2D(dst, ForCount2, ForCount1-2, s);

                }
                if(pointXY[ForCount1-3][ForCount2] == 1)
                {
                    CvScalar s = cvGet2D(dst,ForCount2,ForCount1-3); 
                    s.val[0] = 150;
                    cvSet2D(dst, ForCount2, ForCount1-3, s);

                }

            }
            printf("Error1\n");
            break;
        } 
    }
*/
    for(ForCount1 = YUpperBoundPos-1; ForCount1 >= (YUpperBoundPos/2); ForCount1--)
    {
  
        if(SumY[ForCount1] > Error1YThreshold && SumY[ForCount1-1] > Error1YThreshold && SumY[ForCount1-2] > Error1YThreshold )
        {
            for(ForCount2 = 0; ForCount2 < column; ForCount2++)
            {
                if(pointXY[ForCount2][ForCount1] == 1)
                {
                    CvScalar s = cvGet2D(dst,ForCount1,ForCount2); 
                    s.val[0] = 150;
                    cvSet2D(dst, ForCount1, ForCount2, s);
                    cvSet2D(dst, ForCount1-1, ForCount2, s);
                    cvSet2D(dst, ForCount1-2, ForCount2, s);
                }
            }
            printf("Error1\n\n");
            break;
        }
    }
}
Example #21
0
bool CvNeuralGas::train( CvScalar* _input ) {

    CV_FUNCNAME( "CvNeuralGas::train" );
    __BEGIN__;

    //if( input != NULL )
    //    delete input;

    if( _input != NULL ) {
        input = _input;
    } else {
        // peak random
        int x = rng.next() % (distribution->width - 1);
        int y = rng.next()  % (distribution->height - 1);

        input = &(cvGet2D( distribution, y, x ));
    }

    // Calculate the distance of each node`s reference vector from the
    // projected input vector.
    double temp = 0.0;
    double val = 0.0;
    for( unsigned long int i=0; i<total_nodes; i++ ) {
        CvGasNode* curr = nodes->at( i );
        curr->distance = 0.0;

        CvScalar* ref_vector = &(curr->ref_vector);
        for( int x=0; x<4; x++ ) {
            val = input->val[x] - ref_vector->val[x];
            temp += pow( val, 2.0 );
        }

        curr->distance = sqrt( temp );

        temp = 0.0;
        val = 0.0;
    }

    //Sort the nodes based on their distance.
    std::sort( nodes->begin(), nodes->end(), Compare);

    //Fetch the bmu/smu/wmu.
    bmu = nodes->at( 0 );
    smu = nodes->at( 1 );
    wmu = nodes->at( total_nodes - 1 );

    // Adapt the nodes.
    double epsilon_t = epsilon0 * pow( ( epsilonT / epsilon0 ), (float)iteration/max_iterations );
    double sqr_sigma = lambda0 * pow( ( lambdaT / lambda0 ), (float)iteration/max_iterations );

    for( unsigned long int i=0; i<total_nodes; i++ ) {
        CvGasNode* curr = nodes->at( i );
        curr->rank = -i;

        double h = exp( ((double)curr->rank) / sqr_sigma );

        CvScalar* ref_vector = &(curr->ref_vector);
        CvScalar delta;

        for(int x=0;x<4;x++){
            delta.val[x] = (input->val[x] - ref_vector->val[x]) * h * epsilon_t;
            ref_vector->val[x] += delta.val[x];
        }
    }

    iteration++;

    __END__;
    return true;
}
void compresie_decompresie_cu_DCT( IplImage* image )
{
    int		w, h;
    int		i, j;
    int     k, l;
    double	pi = 3.1415926;
    
    CvScalar   pixel;
    CvScalar   valoare;
    IplImage   *imagine_rezultat;
    IplImage   *imagine_diferenta;
    IplImage   *cosinus;

	int	    N, nivel;
	double  max = 0, min = 10000;
	double  value;
	CvMat	*C;	//matricea transformarii cosinus
	CvMat	*U;	//matricea imaginii in spatiul original
	CvMat	*V;	//matricea imaginii in spatiul transformatei
	CvMat   *AUX; //matrice auxiliara pentru rezultatul partial

    double m, M;
    double prag;
    int    nr;  //numarul de coeficienti anulati in spatiul transformatei

    w = image->width;
    h = image->height;

    if( w == h )
    {
        imagine_rezultat = cvCreateImage( cvSize( w, h ), IPL_DEPTH_8U, 1 );    
        
        N = w;
        C = cvCreateMat( N, N, CV_32FC1 );
        U = cvCreateMat( N, N, CV_32FC1 );
        V = cvCreateMat( N, N, CV_32FC1 );
        AUX = cvCreateMat( N, N, CV_32FC1 );
        
        //formarea matricii C a transformarii cosinus discreta
        for( i = 0; i < N; i++ )
             cvmSet( C, 0, i, 1. / sqrt( (float)N ) );
        
        for( i = 1; i < N; i++ )
             for( j = 0; j < N; j++ )
             {
                  value = sqrt( 2./N ) * cos( pi * ( 2*j + 1 ) * i / ( 2*N ) );
                  cvmSet( C, i, j, value );
                  if( value > max )
                      max = value;
             }
             
        //popularea matricei U cu valorile pixelilor imaginii
        for( i = 0; i < N; i++ )
             for( j = 0; j < N; j++ )
             {
                  pixel = cvGet2D( image, i, j );
                  cvmSet( U, i, j, pixel.val[ 0 ] );
             }
    
        //V = C*U*Ct
        //mai intii vom calcula AUX = C * U
        for( i = 0; i < N; i++ )
             for( j = 0; j < N; j++ )
             {
                  value = 0;	
                  for( k = 0; k < N; k++ )
                       value += cvmGet( C, i, k ) * cvmGet( U, k, j );
                  cvmSet( AUX, i, j, value );
             }
        
        //apoi V = AUX * Ct
        max = 0;
        for( i = 0; i < N; i++ )
             for( j = 0; j < N; j++ )
             {
                  value = 0;	
                  for( k = 0; k < N; k++ )
                       value += cvmGet( AUX, i, k ) * cvmGet( C, j, k );
	              cvmSet( V, i, j, value );
	              
                  if( value > max )
                      max = value;
                  else
                      if( value < min )
                          min = value;    
             }

// /*
        //anularea coeficientilor de energie mica
        prag = 800;
        nr = 0;
        for( i = 0; i < N; i++ )
             for( j = 0; j < N; j++ )
             {
                  value = fabs( cvmGet( V, i, j ) );
                  if( value < prag )
                  {
                      cvmSet( V, i, j, 0 );
                      nr++;
                  }
             }
        printf( "Procentul de valori retinute in spatiul transformatei = %5.2lf%%\n", 100.*(N*N - nr) / (N*N) );
        if( nr != 0 )
            printf( "Factorul de compresie obtinut = %5.2lf\n", 1. * (N * N) / (N*N - nr) );        
// */
        
        // DECOMPRESIA IMAGINII
            
        //U = Ct * V * C
        //AUX = Ct * V
        for( i = 0; i < N; i++ )
             for( j = 0; j < N; j++ )
             {
                  value = 0;
                  for( k = 0; k < N; k++ )
                       value += cvmGet( C, k, i ) * cvmGet( V, k, j );
     		      cvmSet( AUX, i, j, value );	
             }
           	
        //apoi U = AUX * C
        for( i = 0; i < N; i++ )
             for( j = 0; j < N; j++ )
             {
                  value = 0;
                  for( k = 0; k < N; k++ )
                       value += cvmGet( AUX, i, k ) * cvmGet( C, k, j );
                  cvmSet( U, i, j, value );
                  pixel.val[ 0 ] = value;
                  cvSet2D( imagine_rezultat, i, j, pixel );
             }	

        cvNamedWindow( "Imaginea dupa decompresie", 1 );
        cvShowImage( "Imaginea dupa decompresie", imagine_rezultat );

        //calcul pseudo-imagine diferenta (zgomot cauzat de compresia cu pierderi)     
        imagine_diferenta = cvCreateImage( cvSize( w, h ), IPL_DEPTH_8U, 1 );  
        
        for( i = 0; i < N; i++ )
             for( j = 0; j < N; j++ )
             {
                  pixel = cvGet2D( image, i, j );
                  value = (int)( fabs( pixel.val[ 0 ] - cvmGet( U, i, j ) ) );
                  valoare.val[ 0 ] = value;
                  cvSet2D( imagine_diferenta, i, j, valoare );
             }	        
        
        cvNamedWindow( "Pseudo-imaginea diferenta", 1 );
        cvShowImage( "Pseudo-imaginea diferenta", imagine_diferenta );        
        
        cvWaitKey(0);
        cvReleaseImage( &imagine_rezultat );
        cvReleaseImage( &imagine_diferenta );
        cvDestroyWindow( "Imaginea dupa decompresie" );
        cvDestroyWindow( "Pseudo-imaginea diferenta" );
        
        cvReleaseMat( &C );
        cvReleaseMat( &U );        
        cvReleaseMat( &V );
        cvReleaseMat( &AUX );
     }
     else
        printf( "Imaginea de intrare nu este patrata! (w != h)\n" );
}
Example #23
0
void adjust_bodybbox_w_clusters(CvMat* mask, IplImage* cluster, int numclusters, CvRect facebox )
{
  double a=1.15;  //extra growing of body box region
  double A=1.00;  // body bbox is lower than face bbox (neck)
  double b=1+a;

  int bodyx0 = ((facebox.x-facebox.width*   a) < 0 ) ? 0 : (facebox.x-facebox.width* a);
  int bodyy0 = ((facebox.y+facebox.height * A) > cluster->height) ? cluster->height : (facebox.y+facebox.height*A );
  int bodyx1 = ((facebox.x+facebox.width*   b) > cluster->width) ? cluster->width : (facebox.x+facebox.width*b);
  int bodyy1 = (cluster->height);


  int x,y, i=0;
  int *accu;
  int eqclass_1st;      // equivalence class chosen as most populated
  int tmp_eqclass;      // temp, to hold the equivalence class associated to a pixel
  int eqclass_2nd;      // equivalence class chosen as 2nd most populated

  accu = (int*)malloc( numclusters*sizeof(int));
  bzero( accu, numclusters*sizeof(int));
  eqclass_1st = 0;
  eqclass_2nd = numclusters-1; // just initialised to sth != eqclass_1st

  // 1st: get to know the amount of pixels per equivalence class ("cluster")
  // but not blindly: only those in the FG mask already
  for( x = 0; x < cluster->width; x++ ){
    for( y = 0; y < cluster->height; y++ ){      
      // filter the equ_classes using the mask
      if( ( x >= bodyx0)  && ( x <= bodyx1) && ( y >= bodyy0)  && ( y <= bodyy1)){
        tmp_eqclass = (int) ( round(cvGet2D( cluster, y, x).val[1]*numclusters/255.0) -1);
        accu[ tmp_eqclass ] ++;
      }
    }
  }

  // 2nd: get the most populated and the 2nd most populated cluster
  for( i = 0; i< numclusters; i++ ){
    eqclass_1st = ( accu[i] > accu[eqclass_1st] ) ? i : eqclass_1st;
    eqclass_2nd = ( accu[i] > accu[eqclass_2nd] ) ? ((accu[i] < accu[eqclass_1st]) ? i : eqclass_2nd ): eqclass_2nd;
    printf(" %.8d ", accu[i]);
  }
  printf(" (eqclass_1st %d  2nd %d) \n", eqclass_1st, eqclass_2nd);


  // 3rd: Using the pixels inside of a seed of the body bbox, calculated from the face box,
  // we calculate the (minx,miny)-(maxx,maxy) bounding rectangle due to the largest cluster
  int minx=10000, miny=10000, maxx=0, maxy=0;
  for( x = 0; x < cluster->width; x++ ){
    for( y = 0; y < cluster->height; y++ ){      

      if(!( ( x >= bodyx0)  && ( x <= bodyx1) && ( y >= bodyy0)  && ( y <= bodyy1) )){
        cvSet2D(cluster, y, x, cvScalar(0, 0, 0 ) );
        continue;
      }

      tmp_eqclass = (int) ( round(cvGet2D( cluster, y, x).val[1]*numclusters/255.0) -1);

      if(tmp_eqclass == eqclass_1st){
        cvSet2D(cluster, y, x, cvScalar(255, 0, 0 ) ); // for display purposes
        maxx = ( maxx > x ) ? maxx : x;
        maxy = ( maxy > y ) ? maxy : y;
        minx = ( minx < x ) ? minx : x;
        miny = ( miny < y ) ? miny : y;
      }
      else if (tmp_eqclass == eqclass_2nd) {
        cvSet2D(cluster, y, x, cvScalar(100, 0, 0 ) ); // for display purposes
      }
      else{
        cvSet2D(cluster, y, x, cvScalar(10, 0, 0 ) );  // for display purposes
      }
    }
  }

  cvRectangle(cluster, cvPoint(minx, miny), cvPoint(maxx, maxy), cvScalar(255, 0, 255), 1);
  // Last: compose in the mask a body-box based on the largest cluster bbox
  // the rectangle is needed otherwise the largest cluster has loads of holes
  cvRectangle(mask, cvPoint(minx, miny), cvPoint(maxx, cluster->height), cvScalar(GC_PR_FGD, 0, 0), CV_FILLED);

  free( accu );


}
void CTWithWater::imageAndDepthToWorld(double u, double v, double d,
                                       double* x, double* y, double* z,
                                       bool undistort)
{
    double xx, yy, t;
    CvMat* r = cvCreateMat(3, 1, CV_32FC1);

    if(undistort)
    {
        CvMat* I = cvCreateMat(1, 1, CV_32FC2);
        CvMat* Io = cvCreateMat(1, 1, CV_32FC2);
        cvSet2D(I, 0, 0, cvScalar(u,v));
        
        cvUndistortPoints(I, Io, m_CameraMatrix, m_DistCoeffs, NULL, m_CameraMatrixNorm);
        CvScalar s = cvGet2D(Io, 0, 0);
        
        xx = s.val[0];//cvGetReal1D(Io, 0);
        yy = s.val[1];//cvGetReal1D(Io, 1);
        
        cvReleaseMat(&I);
        cvReleaseMat(&Io);            
    }
    else
    {
        xx = u;
        yy = v;
    }

    xx = (xx - cvGetReal2D(m_CameraMatrixNorm, 0, 2))/cvGetReal2D(m_CameraMatrixNorm, 0, 0);
    yy = (yy - cvGetReal2D(m_CameraMatrixNorm, 1, 2))/cvGetReal2D(m_CameraMatrixNorm, 1, 1);

    cvSetReal1D(r, 0, xx); 
    cvSetReal1D(r, 1, yy);
    cvSetReal1D(r, 2, 1.0);

    /* Rt_(3,:)*r = sum of third column of R times elements of r */
    t = xx*cvGetReal2D(m_R, 0, 2) + yy*cvGetReal2D(m_R, 1, 2) + cvGetReal2D(m_R, 2, 2);
    if(t == 0)
    {
        t = 1.0;
    }

    if(d <= 0)
    {
        /* d<= 0 => above water surface */
        t = (-m_dCameraHeightAboveWater-d)/t;

        /* r = t*R'*r + C */
        cvGEMM(m_R, r, t, m_CameraWorld, 1.0, r, CV_GEMM_A_T);
    }
    else
    {
        /* d > 0 => below water surface */

        t = -m_dCameraHeightAboveWater/t;
        
        /* S = t*R'*r */
        cvGEMM(m_R, r, t, NULL, 0, m_S, CV_GEMM_A_T);

        double Sx = cvGetReal1D(m_S, 0);
        double Sy = cvGetReal1D(m_S, 1);    
        double phi = atan2(Sy, Sx);
        double rS = sqrt(Sx*Sx + Sy*Sy);

        double rP = calculateRpFromRs(rS, d, m_dCameraHeightAboveWater);
        cvSetReal1D(r, 0, rP*cos(phi));
        cvSetReal1D(r, 1, rP*sin(phi));
        cvSetReal1D(r, 2, -m_dCameraHeightAboveWater-d);

        cvAdd(r, m_CameraWorld, r);
    }

    *x = cvGetReal1D(r, 0);
    *y = cvGetReal1D(r, 1);
    *z = cvGetReal1D(r, 2);    
                           
    cvReleaseMat(&r);

}
Example #25
0
// callback function to extract the pixel coordinate value from an image
unsigned getimgdata_fn(void *p, unsigned x, unsigned y)
{
	CvScalar s = cvGet2D((IplImage*)p, y, x);
	return (unsigned)s.val[0];
}
Example #26
0
bool GrayImageSegmentByKMeans2 ( const   IplImage *  pImg ,  IplImage * pResult , int nClusters, int sortFlag )

{

        assert ( pImg  !=  NULL &&  pImg -> nChannels == 1);

        // 创建样本矩阵, CV_32FC1 代表位浮点通道(灰度图像)

        CvMat * samples  =  cvCreateMat (( pImg -> width )* ( pImg -> height ),1,  CV_32FC1 );

        // 创建类别标记矩阵, CV_32SF1 代表位整型通道

        CvMat * clusters  =  cvCreateMat (( pImg -> width )* ( pImg -> height ),1,  CV_32SC1 );

        // 创建类别中心矩阵

        CvMat * centers  =  cvCreateMat ( nClusters , 1,  CV_32FC1 );

        //  将原始图像转换到样本矩阵

       {

               int k  = 0;

               CvScalar s ;

               for ( int   i  = 0;  i  <  pImg -> width ;  i ++)

              {

                      for ( int   j =0; j  <  pImg -> height ;  j ++)

                     {

                             s . val [0] = ( float ) cvGet2D ( pImg ,  j ,  i ). val [0];

                             cvSet2D ( samples , k ++, 0,  s );

                     }

              }

       }

        // 开始聚类,迭代次,终止误差 .0

       cvKMeans2 ( samples ,  nClusters , clusters ,  cvTermCriteria ( CV_TERMCRIT_ITER  +  CV_TERMCRIT_EPS ,50, 1.0), 1, 0, 0, centers );

        //  无需排序直接输出时

        if  ( sortFlag  == 0)

       {

               int k  = 0;

               int val  = 0;

               float step  = 255 / (( float ) nClusters  - 1);

               CvScalar s ;

               for ( int   i  = 0;  i  <  pImg -> width ;  i ++)

              {

                      for ( int   j  = 0; j  <  pImg -> height ;  j ++)

                     {

                             val  = ( int ) clusters -> data . i [ k ++];

                             s . val [0] = 255-  val  *  step ; // 这个是将不同类别取不同的像素值,

                             cvSet2D ( pResult , j ,  i ,  s );   // 将每个像素点赋值

                     }

              }

               return true ;

       }    
}
Example #27
0
int opticaltri( CvMat * &clean_texture, int verts )
{
	char * im1fname = "conhull-dirty-thresh.jpg";
	char * im2fname = "conhull-clean-thresh.jpg";

	int count = MAX_COUNT;
	char * status;
	
	CvPoint2D32f * source_points;
	CvPoint2D32f * dest_points;
	CvPoint2D32f * delaunay_points = (CvPoint2D32f*)cvAlloc(MAX_COUNT*sizeof(CvPoint2D32f));

	// count = opticalflow( im1fname, im2fname, source_points, dest_points, status ); 
	count = findsiftpoints( "conhull-dirty.jpg", "conhull-clean.jpg", source_points, dest_points, status ); 

	IplImage * image1 = cvLoadImage(im1fname, CV_LOAD_IMAGE_COLOR);

	CvMemStorage * storage = cvCreateMemStorage(0);
	CvSubdiv2D * delaunay = cvCreateSubdivDelaunay2D( cvRect(0,0,image1->width,image1->height), storage);

	IplImage * image2 = cvLoadImage(im2fname, CV_LOAD_IMAGE_COLOR);

	cvSet( image1, cvScalarAll(255) );

	std::map<CvPoint, CvPoint> point_lookup_map;
	std::vector<std::pair<CvPoint, CvPoint> > point_lookup;

	int num_matches = 0;
	int num_out_matches = 0;
	int max_dist = 50;
	int offset = 200;	

	// put corners in the point lookup as going to themselves
	point_lookup_map[cvPoint(0,0)] = cvPoint(0,0);
	point_lookup_map[cvPoint(0,image1->height-1)] = cvPoint(0,image1->height-1);
	point_lookup_map[cvPoint(image1->width-1,0)] = cvPoint(image1->width-1,0);
	point_lookup_map[cvPoint(image1->width-1,image1->height-1)] = cvPoint(image1->width-1,image1->height-1);

	point_lookup.push_back(std::pair<CvPoint,CvPoint>(cvPoint(0,0), cvPoint(0,0)));
	point_lookup.push_back(std::pair<CvPoint,CvPoint>(cvPoint(0,image1->height-1), cvPoint(0,image1->height-1)));
	point_lookup.push_back(std::pair<CvPoint,CvPoint>(cvPoint(image1->width-1,0), cvPoint(image1->width-1,0)));
	point_lookup.push_back(std::pair<CvPoint,CvPoint>(cvPoint(image1->width-1,image1->height-1), cvPoint(image1->width-1,image1->height-1)));

	printf("Inserting corners...");
	// put corners in the Delaunay subdivision
	for(unsigned int i = 0; i < point_lookup.size(); i++) {
		cvSubdivDelaunay2DInsert( delaunay, cvPointTo32f(point_lookup[i].first) );
	}
	printf("done.\n");

	CvSubdiv2DEdge proxy_edge;
	for(int i = 0; i < count; i++) {
		if(status[i]) {
			CvPoint source = cvPointFrom32f(source_points[i]);
			CvPoint dest = cvPointFrom32f(dest_points[i]);
	
			if((((int)fabs((double)(source.x - dest.x))) > max_dist) ||
				 (((int)fabs((double)(source.y - dest.y))) > max_dist)) {	
				num_out_matches++;
			}
			else if((dest.x >= 0) && (dest.y >= 0) && (dest.x < (image1->width)) && (dest.y < (image1->height))) {
				if(point_lookup_map.find(source) == point_lookup_map.end()) {
					num_matches++;
				
					point_lookup_map[source] = dest;
					point_lookup.push_back(std::pair<CvPoint,CvPoint>(source,dest));
					// delaunay_points[i] = 
					(cvSubdivDelaunay2DInsert( delaunay, cvPointTo32f(source) ))->pt;
					cvSetImageROI( image1, cvRect(source.x-8,source.y-8,8*2,8*2) );
					cvResetImageROI( image2 );
					cvGetRectSubPix( image2, image1, dest_points[i] );
				}
				/*
				cvSet2D( image1, source.y, source.x, cvGet2D( image2, dest.y, dest.x ) );
				cvSet2D( image1, source.y, source.x+1, cvGet2D( image2, dest.y, dest.x+1 ) );
				cvSet2D( image1, source.y, source.x-1, cvGet2D( image2, dest.y, dest.x-1 ) );
				cvSet2D( image1, source.y+1, source.x, cvGet2D( image2, dest.y+1, dest.x ) );
				cvSet2D( image1, source.y-1, source.x, cvGet2D( image2, dest.y-1, dest.x ) );
				cvSet2D( image1, source.y+1, source.x+1, cvGet2D( image2, dest.y+1, dest.x+1 ) );
				cvSet2D( image1, source.y-1, source.x-1, cvGet2D( image2, dest.y-1, dest.x-1 ) );
				cvSet2D( image1, source.y+1, source.x-1, cvGet2D( image2, dest.y+1, dest.x-1 ) );
				cvSet2D( image1, source.y-1, source.x+1, cvGet2D( image2, dest.y-1, dest.x+1 ) );
				*/

				// cvCircle( image1, source, 4, CV_RGB(255,0,0), 2, CV_AA );
				// cvCircle( image2, dest, 4, CV_RGB(255,0,0), 2, CV_AA );
			}

			/*
			cvSetImageROI( image1, cvRect(source.x-offset,source.y-offset,offset*2,offset*2) );
			cvSetImageROI( image2, cvRect(dest.x-offset,dest.y-offset,offset*2,offset*2) );
			cvNamedWindow("image1",0);
			cvNamedWindow("image2",0);
			cvShowImage("image1",image1);
			cvShowImage("image2",image2);
			printf("%d,%d -> %d,%d\n",source.x,source.y,dest.x,dest.y);
			cvWaitKey(0);
			cvDestroyAllWindows();
			*/
		}
	}
	printf("%d %d\n",num_matches,num_out_matches);
	printf("%d lookups\n",point_lookup_map.size());

	cvResetImageROI( image1 );

	cvSaveImage("sparse.jpg", image1);

	cvReleaseImage(&image1);
	image1 = cvLoadImage(im1fname, CV_LOAD_IMAGE_COLOR);
	cvSet( image1, cvScalarAll(255) );
	printf("Warping image...");

	CvSeqReader  reader;
	int total = delaunay->edges->total;
	int elem_size = delaunay->edges->elem_size;


	std::vector<Triangle> trivec;
	std::vector<CvMat *> baryinvvec;

	for( int i = 0; i < total*2; i++ ) {
		if((i == 0) || (i == total)) {
			cvStartReadSeq( (CvSeq*)(delaunay->edges), &reader, 0 );
		}
		CvQuadEdge2D* edge = (CvQuadEdge2D*)(reader.ptr);

		if( CV_IS_SET_ELEM( edge ))	{
			CvSubdiv2DEdge curedge = (CvSubdiv2DEdge)edge;
			CvSubdiv2DEdge t = curedge;
			Triangle temptri;
			int count = 0;
			
			// construct a triangle from this edge
			do {
				CvSubdiv2DPoint* pt = cvSubdiv2DEdgeOrg( t );
				if(count < 3) {
					pt->pt.x = pt->pt.x >= image1->width ? image1->width-1 : pt->pt.x;
					pt->pt.y = pt->pt.y >= image1->height ? image1->height-1 : pt->pt.y;
					pt->pt.x = pt->pt.x < 0 ? 0 : pt->pt.x;
					pt->pt.y = pt->pt.y < 0 ? 0 : pt->pt.y;

					temptri.points[count] = cvPointFrom32f( pt->pt );
				}
				else {
					printf("More than 3 edges\n");
				}
				count++;
				if(i < total)
					t = cvSubdiv2DGetEdge( t, CV_NEXT_AROUND_LEFT );
				else
					t = cvSubdiv2DGetEdge( t, CV_NEXT_AROUND_RIGHT );
			} while( t != curedge );
			
			// check that triangle is not already in
			if( std::find(trivec.begin(), trivec.end(), temptri) == trivec.end() ) {
				// push triangle in and draw
				trivec.push_back(temptri);
				cvLine( image1, temptri.points[0], temptri.points[1], CV_RGB(255,0,0), 1, CV_AA, 0 );
				cvLine( image1, temptri.points[1], temptri.points[2], CV_RGB(255,0,0), 1, CV_AA, 0 );
				cvLine( image1, temptri.points[2], temptri.points[0], CV_RGB(255,0,0), 1, CV_AA, 0 );

				// compute barycentric computation vector for this triangle
				CvMat * barycen = cvCreateMat( 3, 3, CV_32FC1 );
				CvMat * baryceninv = cvCreateMat( 3, 3, CV_32FC1 );

				barycen->data.fl[3*0+0] = temptri.points[0].x;
				barycen->data.fl[3*0+1] = temptri.points[1].x;
				barycen->data.fl[3*0+2] = temptri.points[2].x;
				barycen->data.fl[3*1+0] = temptri.points[0].y;
				barycen->data.fl[3*1+1] = temptri.points[1].y;
				barycen->data.fl[3*1+2] = temptri.points[2].y;
				barycen->data.fl[3*2+0] = 1;
				barycen->data.fl[3*2+1] = 1;
				barycen->data.fl[3*2+2] = 1;

				cvInvert( barycen, baryceninv, CV_LU );
				baryinvvec.push_back(baryceninv);

				cvReleaseMat( &barycen );
			}
		}

		CV_NEXT_SEQ_ELEM( elem_size, reader );
	}
	printf("%d triangles...", trivec.size());
	cvSaveImage("triangles.jpg", image1);
	
	cvSet( image1, cvScalarAll(255) );
	IplImage * clean_nonthresh = cvLoadImage( "conhull-clean.jpg", CV_LOAD_IMAGE_COLOR );

	// for each triangle
	for(unsigned int i = 0; i < trivec.size(); i++) {
		Triangle curtri = trivec[i];
		CvMat * curpoints = cvCreateMat( 1, 3, CV_32SC2 );
		Triangle target;
		std::map<CvPoint,CvPoint>::iterator piter[3];
		
		printf("Triangle %d / %d\n",i,trivec.size());
		int is_corner = 0;
		for(int j = 0; j < 3; j++) {
			/*
			curpoints->data.i[2*j+0] = curtri.points[j].x;
			curpoints->data.i[2*j+1] = curtri.points[j].y;
			*/
			CV_MAT_ELEM( *curpoints, CvPoint, 0, j ) = curtri.points[j];
			printf("%d,%d\n",curtri.points[j].x,curtri.points[j].y);
	
			if((curtri.points[j] == cvPoint(0,0)) ||  (curtri.points[j] == cvPoint(0,image1->height - 1)) ||(curtri.points[j] == cvPoint(image1->width - 1,0)) ||(curtri.points[j] == cvPoint(image1->width - 1,image1->height - 1))) {
				is_corner++;
			}
			

			for(unsigned int k = 0; k < point_lookup.size(); k++) {
				std::pair<CvPoint,CvPoint> thispair = point_lookup[k];
				if(thispair.first == curtri.points[j]) {
					target.points[j] = thispair.second;
					break;
				}
			}

			/*
			piter[j] = point_lookup_map.find(curtri.points[j]);
			if(piter[j] != point_lookup_map.end() ) {
				target.points[j] = piter[j]->second;
			}
			*/
		}
			
		// if((piter[0] != point_lookup_map.end()) && (piter[1] != point_lookup_map.end()) && (piter[2] != point_lookup_map.end())) {
		if(is_corner < 3) {
			CvMat * newcorners = cvCreateMat( 3, 3, CV_32FC1 );
			newcorners->data.fl[3*0+0] = target.points[0].x;
			newcorners->data.fl[3*0+1] = target.points[1].x;
			newcorners->data.fl[3*0+2] = target.points[2].x;
			newcorners->data.fl[3*1+0] = target.points[0].y;
			newcorners->data.fl[3*1+1] = target.points[1].y;
			newcorners->data.fl[3*1+2] = target.points[2].y;
			newcorners->data.fl[3*2+0] = 1;
			newcorners->data.fl[3*2+1] = 1;
			newcorners->data.fl[3*2+2] = 1;

			CvContour hdr;
			CvSeqBlock blk;
			CvRect trianglebound = cvBoundingRect( cvPointSeqFromMat(CV_SEQ_KIND_CURVE+CV_SEQ_FLAG_CLOSED, curpoints, &hdr, &blk), 1 );
			printf("Bounding box: %d,%d,%d,%d\n",trianglebound.x,trianglebound.y,trianglebound.width,trianglebound.height);
			for(int y = trianglebound.y; (y < (trianglebound.y + trianglebound.height)) && ( y < image1->height); y++) {
				for(int x = trianglebound.x; (x < (trianglebound.x + trianglebound.width)) && (x < image1->width); x++) {
					// check to see if we're inside this triangle
					/*
					CvPoint v0 = cvPoint( curtri.points[2].x - curtri.points[0].x, curtri.points[2].y - curtri.points[0].y );
					CvPoint v1 = cvPoint( curtri.points[1].x - curtri.points[0].x, curtri.points[1].y - curtri.points[0].y );
					CvPoint v2 = cvPoint( x - curtri.points[0].x, y - curtri.points[0].y );
					
					int dot00 = v0.x * v0.x + v0.y * v0. y;
					int dot01 = v0.x * v1.x + v0.y * v1. y;
					int dot02 = v0.x * v2.x + v0.y * v2. y;
					int dot11 = v1.x * v1.x + v1.y * v1. y;
					int dot12 = v1.x * v2.x + v1.y * v2. y;

					double invDenom = 1.0 / (double)(dot00 * dot11 - dot01 * dot01);
					double u = (double)(dot11 * dot02 - dot01 * dot12) * invDenom;
					double v = (double)(dot00 * dot12 - dot01 * dot02) * invDenom;
					*/

					CvMat * curp = cvCreateMat(3, 1, CV_32FC1);
					CvMat * result = cvCreateMat(3, 1, CV_32FC1);
					curp->data.fl[0] = x;
					curp->data.fl[1] = y;
					curp->data.fl[2] = 1;
					cvMatMul( baryinvvec[i], curp, result );
					// double u = result->data.fl[0]/result->data.fl[2];
					// double v = result->data.fl[1]/result->data.fl[2];

					/*
					if((i == 3019) && (y == 1329) && (x > 2505) && (x < 2584)) {
						printf("Range %d: %f, %f, %f\t%f, %f, %f\n",x,result->data.fl[0],result->data.fl[1],result->data.fl[2],
								sourcepoint->data.fl[0],sourcepoint->data.fl[1],sourcepoint->data.fl[2]);
					}
					*/

					if( (result->data.fl[0] > MIN_VAL) && (result->data.fl[1] > MIN_VAL) && (result->data.fl[2] > MIN_VAL) && (fabs(1.0 - (result->data.fl[0]+result->data.fl[1]+result->data.fl[2])) <= 0.01) ) {
					// if((u > 0) || (v > 0) /*&& ((u +v) < 1)*/ )
						// printf("Barycentric: %f %f %f\n", result->data.fl[0], result->data.fl[1], result->data.fl[2]);
						// this point is inside this triangle
						// printf("Point %d,%d inside %d,%d %d,%d %d,%d\n",x,y,trivec[i].points[0].x,trivec[i].points[0].y,
						//	trivec[i].points[1].x,trivec[i].points[1].y,trivec[i].points[2].x,trivec[i].points[2].y);
						
						CvMat * sourcepoint = cvCreateMat(3, 1, CV_32FC1);
						cvMatMul( newcorners, result, sourcepoint );	
					
						double sourcex = sourcepoint->data.fl[0]/*/sourcepoint->data.fl[2]*/;
						double sourcey = sourcepoint->data.fl[1]/*/sourcepoint->data.fl[2]*/;
						if((sourcex >= 0) && (sourcey >= 0) && (sourcex < (image1->width)) && (sourcey < (image1->height))) {
							// printf("%d,%d %d,%d\n",x,y,(int)sourcex,(int)sourcey);
							cvSet2D( image1, y, x, cvGet2D( clean_nonthresh, (int)sourcey, (int)sourcex ) );
						}
	
						
						// printf("Point %d,%d inside %d,%d %d,%d %d,%d\n",x,y,trivec[i].points[0].x,trivec[i].points[0].y,
						//		trivec[i].points[1].x,trivec[i].points[1].y,trivec[i].points[2].x,trivec[i].points[2].y);

						cvReleaseMat( &sourcepoint );
					}
					cvReleaseMat( &result );
					cvReleaseMat( &curp );
				}
			}
			
			for(int k = 0; k < verts; k++) {
				double x = clean_texture->data.fl[2*k+0];
				double y = clean_texture->data.fl[2*k+1];
				
				// check to see if we're inside this triangle
				CvMat * curp = cvCreateMat(3, 1, CV_32FC1);
				CvMat * result = cvCreateMat(3, 1, CV_32FC1);
				curp->data.fl[0] = x;
				curp->data.fl[1] = y;
				curp->data.fl[2] = 1;
				cvMatMul( baryinvvec[i], curp, result );
			
				if( (result->data.fl[0] > MIN_VAL) && (result->data.fl[1] > MIN_VAL) && (result->data.fl[2] > MIN_VAL) && (fabs(1.0 - (result->data.fl[0]+result->data.fl[1]+result->data.fl[2])) <= 0.01) ) {
					
					CvMat * sourcepoint = cvCreateMat(3, 1, CV_32FC1);
					cvMatMul( newcorners, result, sourcepoint );	
				
					double sourcex = sourcepoint->data.fl[0]/*/sourcepoint->data.fl[2]*/;
					double sourcey = sourcepoint->data.fl[1]/*/sourcepoint->data.fl[2]*/;
					if((sourcex >= 0) && (sourcey >= 0) && (sourcex < (image1->width)) && (sourcey < (image1->height))) {
						clean_texture->data.fl[2*k+0] = sourcex;
						clean_texture->data.fl[2*k+1] = sourcey;
						// cvSet2D( image1, y, x, cvGet2D( clean_nonthresh, (int)sourcey, (int)sourcex ) );
					}

					cvReleaseMat( &sourcepoint );
				}
				cvReleaseMat( &result );
				cvReleaseMat( &curp );
			}
			cvReleaseMat( &newcorners );
		}
		cvReleaseMat( &curpoints );
	}

	cvReleaseImage( &clean_nonthresh );

	printf("done.\n");

	cvSaveImage("fullwarp.jpg", image1);

	printf("Drawing subdivisions on warped image...");
	draw_subdiv( image1, delaunay, NULL, NULL, 0, NULL );
	// draw_subdiv( image1, delaunay, delaunay_points, source_points, count, status );
	printf("done.\n");
	
	cvSaveImage("edgeswarp.jpg", image1);

	cvReleaseImage(&image2);

	image2 = cvLoadImage(im2fname, CV_LOAD_IMAGE_COLOR);
	// cvCreateImage( cvGetSize(image2), IPL_DEPTH_8U, 3 );

	// cvCalcSubdivVoronoi2D( delaunay );
	printf("Drawing subdivisions on unwarped image...");
	// draw_subdiv( image2, delaunay, delaunay_points, dest_points, count, status );
	// draw_subdiv( image2, delaunay, NULL, NULL, 0, NULL );
	printf("done.\n");

	cvSaveImage("edges.jpg",image2);

	cvReleaseImage(&image1);
	cvFree(&source_points);
	cvFree(&dest_points);
	cvFree(&status);
	cvReleaseMemStorage(&storage);
	cvFree(&delaunay_points);

	cvReleaseImage(&image2);

	return 0;
}
Example #28
0
bool ColorImageSegmentByKMeans2 ( const   IplImage *  img ,  IplImage *  pResult, int nClusters,  int   sortFlag )

{
        assert ( img  !=  NULL &&  pResult  !=  NULL );

        assert ( img -> nChannels == 3 &&  pResult -> nChannels  == 1);

        int   i , j ;

        CvMat * samples = cvCreateMat (( img -> width )*( img -> height ),1, CV_32FC3 ); // 创建样本矩阵, CV_32FC3 代表位浮点通道(彩色图像)

        CvMat * clusters = cvCreateMat (( img -> width )*( img -> height ),1, CV_32SC1 ); // 创建类别标记矩阵, CV_32SF1 代表位整型通道

        int   k =0;

        for  ( i =0; i < img -> width ; i ++)

       {

               for ( j =0; j < img -> height ; j ++)

              {

                      CvScalar s ;

                      // 获取图像各个像素点的三通道值( RGB )

                      s . val [0]=( float ) cvGet2D ( img , j , i ). val [0];

                      s . val [1]=( float ) cvGet2D ( img , j , i ). val [1];

                      s . val [2]=( float ) cvGet2D ( img , j , i ). val [2];

                      cvSet2D ( samples , k ++,0, s ); // 将像素点三通道的值按顺序排入样本矩阵

              }

       }

		
        cvKMeans2 ( samples , nClusters , clusters , cvTermCriteria ( CV_TERMCRIT_ITER ,50,1.0)); // 开始聚类,迭代次,终止误差 .0

        k =0;

        int   val =0;

        float   step =255/( nClusters -1);

        for  ( i =0; i < img -> width ; i ++)

       {

               for ( j =0; j < img -> height ; j ++)

              {

                      val =( int ) clusters -> data . i [ k ++];

                      CvScalar s ;

                      s . val [0]=255- val * step ; // 这个是将不同类别取不同的像素值,

                      cvSet2D ( pResult , j , i , s );         // 将每个像素点赋值       

              }

       }

        cvReleaseMat (& samples );

        cvReleaseMat (& clusters );

        return   true ;

}
void draw_lateral_offset(ProbabilisticMapParams map_params, IplImage *dst_image)
{
  int x_offset = (int)ceil(2.0 / map_params.grid_res);
  int x_limit = (int)ceil(48.0 / map_params.grid_res);
  int rect_size_x = (int)ceil(3.0 / map_params.grid_res);
  int rect_size_y = (int)ceil(1.0 / map_params.grid_res);

  int n_size_x = (int)(MIN(dst_image->width, x_limit + x_offset) - x_offset) / rect_size_x;
  int n_size_y = (int)dst_image->height / rect_size_y;

  static double **values;
  if (values == NULL)
  {
    values = (double**)malloc(n_size_y * sizeof(double*));
    for (int i = 0; i < n_size_y; i++)
      values[i] = (double*)malloc(n_size_x * sizeof(double));
  }

  for (int j = 0; j < n_size_y; j++)
  {
    //cvLine(dst_image, cvPoint(0, j * rect_size_y), cvPoint(stereo_width - 1, j * rect_size_y), CV_RGB(0,0,255));
    for (int i = 0; i < n_size_x; i++)
    {
      CvRect rect = cvRect(x_offset + i * rect_size_x, j * rect_size_y, rect_size_x, rect_size_y);

      //cvLine(dst_image, cvPoint(x_offset + i * rect_size_x, 0), cvPoint(x_offset + i * rect_size_x, stereo_height - 1), CV_RGB(0,0,255));

      unsigned long sum = 0;
      for (int y = rect.y; y < rect.y + rect.height; y++)
      {
        for (int x = rect.x; x < rect.x + rect.width; x++)
        {
          sum += (int)cvGet2D(dst_image, y, x).val[0];
        }
      }

      values[j][i] = (double) sum;
      //printf("%f\t", values[j][i]);
    }

    //printf("\n");
  }

  for (int j = 0; j < n_size_x; j++)
  {
    double sum = 0.0;
    int n = 0;
    for (int i = 0; i < n_size_y; i++)
    {
      if (values[i][j] > 0.0)
      {
        sum += values[i][j];
        n++;
      }
    }

    double mean = (double)sum / n;

    double acum = 0.0;
    for (int i = 0; i < n_size_y; i++)
    {
      if (values[i][j] > 0.0)
      {
        acum = acum + (values[i][j] - mean) * (values[i][j] - mean);
      }
    }

    double variance = (double)acum / n;
    double std_dev = sqrt(variance);

    double alpha = 0.8;
    int start_index = INT_MIN;
    int n_iters = 0, start_cand = -1;
    int n_end_y = 24;
    int n_max_iters = 12;

    while (start_index < 0 || start_index > n_end_y)
    {
      alpha += 0.1;
      double threshould = mean - alpha * std_dev;

      for (int i = n_end_y; i >= 0; i--)
      {
        if (values[i][j] >= threshould)
        {
          if (start_index < i && n_iters == 0)
            start_cand = i;

          if (n_iters >= 3)
          {
            start_index = start_cand;
            break;
          }

          n_iters++;
        }
        else
        {
          n_iters = 0;
          start_index = INT_MIN;
        }
      }

      n_max_iters--;
      if (n_max_iters <= 0)
        break;
    }

    if (start_index >= 0 && start_index < n_end_y)
    {
      CvRect rect_ri = cvRect(x_offset + j * rect_size_x, start_index * rect_size_y, rect_size_x, rect_size_y);
      cvRectangle(dst_image, cvPoint(rect_ri.x, rect_ri.y), cvPoint(rect_ri.x + rect_ri.width, rect_ri.y - rect_ri.height), CV_RGB(0,255,0), 1, 8, 0);

      CvRect rect_ro = cvRect(x_offset + j * rect_size_x, (start_index + 1) * rect_size_y, rect_size_x, rect_size_y);
      cvRectangle(dst_image, cvPoint(rect_ro.x, rect_ro.y), cvPoint(rect_ro.x + rect_ro.width, rect_ro.y - rect_ro.height), CV_RGB(255,0,0), 1, 8, 0);
    }
  }
}
Example #30
0
float blurriness( IplImage* image)
{

	IplImage* clm_dst = cvCreateImage(cvSize(image->width,image->height),image->depth,image->nChannels);
	IplImage* row_dst = cvCreateImage(cvSize(image->width,image->height),image->depth,image->nChannels);
	IplImage* act_var_v = cvCreateImage(cvSize(image->width,image->height),image->depth,image->nChannels);
	IplImage* act_var_h = cvCreateImage(cvSize(image->width,image->height),image->depth,image->nChannels);
	IplImage* blur_var_v = cvCreateImage(cvSize(image->width,image->height),image->depth,image->nChannels);
	IplImage* blur_var_h = cvCreateImage(cvSize(image->width,image->height),image->depth,image->nChannels);
	IplImage* var_v = cvCreateImage(cvSize(image->width,image->height),image->depth,image->nChannels);
	IplImage* var_h = cvCreateImage(cvSize(image->width,image->height),image->depth,image->nChannels);
	IplImage* diff_v = cvCreateImage(cvSize(image->width,image->height),image->depth,image->nChannels);
	IplImage* diff_h = cvCreateImage(cvSize(image->width,image->height),image->depth,image->nChannels);
	
	IplImage* temp = cvCreateImage( cvSize(image->width,image->height),image->depth,image->nChannels);
	cvCopy( image,temp);
#if PRINT_DEBUG
	if(glb_img==NULL) {
		cout<<"global image is Null\n";
	}
	cout<<"global image type:"<<glb_img->width<<","<<glb_img->height<<endl;
	cout<<"temp img type:"<<temp->width<<","<<temp->height<<endl;
#endif
//	clm_dst = cvCreateImage( cvSize(image->width,image->height),image->depth,image->nChannels);
//	row_dst = cvCreateImage( cvSize(image->width,image->height),image->depth,image->nChannels);
	
	cvSmooth( temp,row_dst,CV_BLUR,1,9);
	cvSmooth( temp,clm_dst,CV_BLUR,9,1);
#if SHOW_IMAGE
	cvShowImage("act image",image);
	cvWaitKey(0);
	cvShowImage("row dst",row_dst);
	cvWaitKey(0);
	cvShowImage("clm dst",clm_dst);
	cvWaitKey(0);
	cvCvtColor(row_dst,row_dst,CV_RGB2YCrCb);
	cvCvtColor(clm_dst,clm_dst,CV_RGB2YCrCb);
	cvShowImage("row dst",row_dst);
	cvWaitKey(0);
	cvShowImage("clm dst",clm_dst);
	cvWaitKey(0);
#endif

//	act_var_v = cvCreateImage(cvSize(image->width,image->height),IPL_DEPTH_32F,image->nChannels);
//	act_var_h = cvCreateImage(cvSize(image->width,image->height),IPL_DEPTH_32F,image->nChannels);
//	blur_var_v = cvCreateImage(cvSize(image->width,image->height),IPL_DEPTH_32F,image->nChannels);
//	blur_var_h = cvCreateImage(cvSize(image->width,image->height),IPL_DEPTH_32F,image->nChannels);
	
	int i,j;
	CvScalar scal;
	CvScalar scal1;
	CvScalar scal2;
    for( i=0;i<image->height;i++) {
         for( j=0;j<(image->width-1);j++) {
             scal1 = cvGet2D(image,i,j);
             scal2 = cvGet2D(image,i,j+1);
             scal.val[0] = abs(scal1.val[0] - scal2.val[0]);
             cvSet2D(act_var_h,i,j,scal);
//             cout<<scal.val[0]<<" ";
         }
		 scal.val[0] = 0;
		 cvSet2D(act_var_h,i,j,scal);
//         cout<<"\n";
     }
//	 cout<<"*********************************************\n";
//	 cout<<"2nd part"<<endl;
     for( i=0;i<image->width;i++) {
         for( j=0;j<(image->height-1);j++) {
             scal1 = cvGet2D( image,j,i);
             scal2 = cvGet2D( image,j+1,i);
             scal.val[0] = abs(scal1.val[0] - scal2.val[0]);
             cvSet2D(act_var_v,j,i,scal);
//             cout<<scal.val[0]<<" ";
         }
		 scal.val[0] = 0;
		 cvSet2D(act_var_v,j,i,scal);
//         cout<<"\n";
     }

	for( i=0;i<image->height;i++) {
		for( j=0;j<(image->width-1);j++) {
			scal1 = cvGet2D(row_dst,i,j);
			scal2 = cvGet2D(row_dst,i,j+1);
			scal.val[0] = abs(scal1.val[0] - scal2.val[0]);
			cvSet2D(blur_var_h,i,j,scal);
//			cout<<scal.val[0]<<" ";
		}
		scal.val[0] = 0;
		cvSet2D(blur_var_h,i,j,scal);
//		cout<<"\n";
	}
//	cout<<"*********************************************\n";
//	cout<<"2nd part"<<endl;
	for( i=0;i<image->width;i++) {
		for( j=0;j<(image->height-1);j++) {
			scal1 = cvGet2D( clm_dst,j,i);
			scal2 = cvGet2D( clm_dst,j+1,i);
			scal.val[0] = abs(scal1.val[0] - scal2.val[0]);
			cvSet2D(blur_var_v,j,i,scal);
//			cout<<scal.val[0]<<" ";
		}
		scal.val[0] = 0;
		cvSet2D(blur_var_v,j,i,scal);

//		cout<<"\n";
	}
#if PRINT_DEBUG
	cout<<"image:"<<image->width<<" "<<image->height<<endl;
	cout<<"image:"<<image->width<<" "<<image->height<<endl;
	cout<<"act_v:"<<act_var_v->width<<" "<<act_var_h->height<<endl;
	cout<<"act_h:"<<act_var_h->width<<" "<<act_var_h->height<<endl;
	cout<<"blur_v:"<<blur_var_v->width<<" "<<blur_var_h->height<<endl;
	cout<<"blur_h:"<<blur_var_h->width<<" "<<blur_var_h->height<<endl;
#endif

//	diff_v = cvCreateImage(cvSize(image->width,image->height),IPL_DEPTH_32F,image->nChannels);
//	diff_h = cvCreateImage( cvSize(image->width,image->height),IPL_DEPTH_32F,image->nChannels);
	for(i=0;i<image->height;i++) {
		for(j=0;j<image->width;j++) {
			scal1 = cvGet2D(act_var_v,i,j);
			scal2 = cvGet2D(blur_var_v,i,j);
			scal.val[0] = abs(scal1.val[0] - scal2.val[0]);
//			cout<<scal.val[0]<<" ";
			cvSet2D( diff_v,i,j,scal);
		}
//		cout<<"("<<i<<","<<j<<")\n";
	}
//	cout<<"diff: ("<<diff_v->height<<","<<diff_v->width<<")\n";
	for( i=0;i<act_var_h->height;i++) {
	    for( j=0;j<act_var_h->width;j++) {
		    scal1 = cvGet2D(act_var_h,i,j);
    	    scal2 = cvGet2D(blur_var_h,i,j);
	        scal.val[0] = abs(scal1.val[0] - scal2.val[0]);
//			cout<<scal.val[0]<<" ";
        	cvSet2D( diff_h,i,j,scal);
	  	}
//		cout<<endl;
	}   

	int max = 0;
//	var_v = cvCreateImage(cvSize(image->width,image->height),IPL_DEPTH_32F,image->nChannels);
//	var_h = cvCreateImage(cvSize(image->width,image->height),IPL_DEPTH_32F,image->nChannels);
	
#if PRINT_DEBUG
	for(i=0;i<image->height;i++) {
		for( j=0;j<image->width;j++) {	
			scal = cvGet2D(diff_v,i,j);
			if(max<scal.val[0]) {
				max = scal.val[0];
			}
		}
	}
	cout<<"max buff_v:"<<max<<endl;
	for(i=0;i<image->height;i++) {
		for(j=0;j<image->width;j++) {
			scal.val[0] = max;
			cvSet2D(var_v,i,j,scal);
		}
	}

	max = 0;
	for(i=0;i<image->height;i++) {
    for( j=0;j<image->width;j++) {
        scal = cvGet2D(diff_h,i,j);
	        if(max<scal.val[0]) {
	            max = scal.val[0];
	        }   
	    }   
	} 
	cout<<"max buff_h:"<<max<<endl;
#endif
	for(i=0;i<image->height;i++) {
	    for(j=0;j<image->width;j++) {
	        scal.val[0] = max;
	        cvSet2D(var_h,i,j,scal);
	    }
	}
	int act_sum_v =0;
	int act_sum_h =0;
	int diff_sum_v =0;
	int diff_sum_h =0;
	for(i=0;i<image->height;i++) {
		for(j=0;j<image->width;j++) {
			scal = cvGet2D(act_var_v,i,j);
			act_sum_v += scal.val[0];
		}
	}
	for(i=0;i<image->height;i++) {
		for(j=0;j<image->width;j++) {
			scal = cvGet2D( act_var_h,i,j);
			act_sum_h += scal.val[0];
		}
	}
	for(i=0;i<image->height;i++) {
	    for(j=0;j<image->width;j++) {
	        scal = cvGet2D( diff_v,i,j);
	        diff_sum_v += scal.val[0];
    	}
	}
	for(i=0;i<image->height;i++) {
	    for(j=0;j<image->width;j++) {
	        scal = cvGet2D( diff_h,i,j);
	        diff_sum_h += scal.val[0];
	    }
	}
#if PRINT_DEBUG
	cout<<"actual image sum of vertical variation : "<<act_sum_v<<endl;
	cout<<"horizontal : "<<act_sum_h<<endl;
	cout<<"sum of defference of variation vertically:"<<diff_sum_v<<endl;
	cout<<"horizontally:"<<diff_sum_h<<endl;
#endif
	float blur_v=0 ;
	float blur_h=0;
	float blur =0;
	blur_v = (float)abs(act_sum_v - diff_sum_v)/act_sum_v;
	blur_h = (float)abs(act_sum_h - diff_sum_h)/act_sum_h;
#if PRINT_DEBUG
	cout<<"blur metrics: vertically:"<<blur_v<<endl;
	cout<<"              horizontally:"<<blur_h<<endl;
#endif
	if( blur_v>	blur_h) {
		blur = blur_v;
	}
	else
		blur = blur_h;
#if PRINT_DEBUG
	cout<<"blur:"<<blur<<endl;
#endif
	return(blur);
}