Esempio n. 1
0
void cvSoftmaxDer(CvMat * X, CvMat * dE_dY, CvMat * dE_dY_afder) {
  CV_FUNCNAME("cvSoftmaxDer");
  __BEGIN__;
  const int nr = X->rows, nc = X->cols, dtype = CV_MAT_TYPE(X->type);
  CvMat * Y = cvCreateMat(nr, nc, dtype);
  CvMat * dE_dY_transpose = cvCreateMat(nr, nc, dtype);
  CvMat * sum = cvCreateMat(nr, 1, dtype);
  CvMat * sum_repeat = cvCreateMat(nr, nc, dtype);
  cvSoftmax(X, Y);
  if (dE_dY->rows==nc && dE_dY->cols==nr){
    cvTranspose(dE_dY,dE_dY_transpose);
    cvMul(Y,dE_dY_transpose,dE_dY_afder);
  }else{
    cvMul(Y,dE_dY,dE_dY_afder);
  }
  cvReduce(dE_dY_afder,sum,-1,CV_REDUCE_SUM);
  cvRepeat(sum,sum_repeat);
  cvMul(Y,sum_repeat,sum_repeat);
  cvSub(dE_dY_afder,sum_repeat,dE_dY_afder);
  cvReleaseMat(&dE_dY_transpose);
  cvReleaseMat(&sum);
  cvReleaseMat(&sum_repeat);
  cvReleaseMat(&Y);
  __END__;
}
Esempio n. 2
0
/*************************************************************************
* @函数名称:
*	calGradSim()
* @输入:
*   const IplImage* image1           - 输入图像1
*   const IplImage* image2           - 输入图像2
* @返回值:
*   double g			             - 梯度相似性
* @说明:
*   计算图像梯度相似性
*************************************************************************/
double calGradSim(const IplImage* image1, const IplImage* image2)
{
	double c4 = 0;   
	double g = 0;

	IplImage* g1;
	IplImage* g2;
	IplImage* tmp;

	g1=gradientImage(image1);
	g2=gradientImage(image2);
	tmp = cvCloneImage(g1);

	cvMul(g1, g2, tmp);
	cvMul(g1, g1, g1);
	cvMul(g2, g2, g2);

	CvScalar s1 = cvSum(tmp);
	CvScalar s2 = cvSum(g1);
	CvScalar s3 = cvSum(g2);

	c4 = (0.03 * 255) * (0.03 * 255);
	g = (2 * s1.val[0] + c4) / (s2.val[0] +s3.val[0] + c4);

	cvReleaseImage(&g1);
	cvReleaseImage(&g2);
	cvReleaseImage(&tmp);

	return g;
}
Esempio n. 3
0
CV_IMPL void cvCalS(const CvArr* srcarr,
                    CvArr* dstarr)
{
    CV_FUNCNAME("cvCalS");
    
    __BEGIN__;
    CvMat sstub, *src;
    CvMat dstub, *dst;
    CvMat* src_dx=0, *src_dy=0;
    CvSize size;
    int i, j;
    int iStep;
    float* fPtr;
    
    CV_CALL( src = cvGetMat(srcarr, &sstub ));
    CV_CALL( dst = cvGetMat(dstarr, &dstub ));
    
    if( CV_MAT_TYPE(src->type) != CV_32FC1)
        CV_ERROR( CV_StsUnsupportedFormat, "Only-32bit, 1-channel input images are supported" );
    
    if( CV_MAT_TYPE(dst->type) != CV_32FC1)
        CV_ERROR( CV_StsUnsupportedFormat, "Only-32bit, 1-channel input images are supported" );
    
    if( !CV_ARE_SIZES_EQ( src, dst ))
        CV_ERROR( CV_StsUnmatchedSizes, "The input images must have the same size" );
    
    size = cvGetMatSize( src );
    
    src_dx  = cvCreateMat(size.height, size.width, CV_32FC1 );
    src_dy  = cvCreateMat(size.height, size.width, CV_32FC1 );
    cvSetZero(src_dx);
    cvSetZero(src_dy);
    
    iStep = dst->step / sizeof(fPtr[0]);
    fPtr = dst->data.fl;
    
    cvSobel(src, src_dx, 1, 0, 1);
    cvSobel(src, src_dy, 0, 1, 1);
    cvMul(src_dx, src_dx, src_dx, 0.25f*0.25f); //rescale gradient
    cvMul(src_dy, src_dy, src_dy, 0.25f*0.25f); //rescale gradient
    cvAdd(src_dx, src_dy, dst);
    
    for(j=0; j<size.height; j++){
        for (i=0; i<size.width; i++)
            fPtr[i+iStep*j] = sqrt(fPtr[i+iStep*j])+SMALLNUM;
    }
    cvReleaseMat(&src_dx);
    cvReleaseMat(&src_dy);
    
    __END__;
}
Esempio n. 4
0
CV_IMPL void cvCurvature(const CvArr* srcarr_x, 
                         const CvArr* srcarr_y,
                         CvArr* dstarr)
{
    CV_FUNCNAME("cvCurvature");
    
    __BEGIN__;
    
    CvMat sstub_x, sstub_y, *src_x, *src_y;
    CvMat dstub, *dst;
    CvSize size;
    CvMat *Nxx=0, *Nyy=0, *ones=0;
    
    CV_CALL( src_x = cvGetMat(srcarr_x, &sstub_x ));
    CV_CALL( src_y = cvGetMat(srcarr_y, &sstub_y ));
    CV_CALL( dst = cvGetMat(dstarr, &dstub ));
    
    if( CV_MAT_TYPE(src_x->type) != CV_32FC1)
        CV_ERROR( CV_StsUnsupportedFormat, "Only-32bit, 1-channel input images are supported" );
    
    if( CV_MAT_TYPE(src_y->type) != CV_32FC1)
        CV_ERROR( CV_StsUnsupportedFormat, "Only-32bit, 1-channel input images are supported" );
    
    if( CV_MAT_TYPE(dst->type) != CV_32FC1)
        CV_ERROR( CV_StsUnsupportedFormat, "Only-32bit, 1-channel input images are supported" );
    
    if( !CV_ARE_SIZES_EQ( src_x, src_y ))
        CV_ERROR( CV_StsUnmatchedSizes, "The input images must have the same size" );
    
    size = cvGetMatSize( src_x );
    Nxx = cvCreateMat(size.height, size.width, CV_32FC1 );
    Nyy = cvCreateMat(size.height, size.width, CV_32FC1 );
    ones= cvCreateMat(size.height, size.width, CV_32FC1 );
    cvSetZero(Nxx);
    cvSetZero(Nyy);
    cvSet(ones, cvScalar(1.0f));
    
    cvSobel(src_x, Nxx, 1, 0, 1);
    cvSobel(src_y, Nyy, 0, 1, 1);
    cvMul(Nxx, ones, Nxx, 0.25f);
    cvMul(Nyy, ones, Nyy, 0.25f);
    cvAdd(Nxx, Nyy, dst);
    cvReleaseMat(&Nxx);
    cvReleaseMat(&Nyy);
    cvReleaseMat(&ones);
    
    __END__;
    
}
/*IplImage* HomographyCalculationThread::rectifyImage(IplImage* inImage, IplImage* outImage,double oldFirstApexX,double oldFirstApexY,double width,double height) 
{
	double x1 = 0;
	double x2 = 0;
	double x3 = 0;
	double y_position = 0;
	double x_position = 0;
	uchar* data = (uchar *)inImage->imageData;
	uchar* dataOut = (uchar *)outImage->imageData;
	for(int row=0;row<height;row++) {

		for(int col=0;col<width;col++) {

			x1 = cvmGet(mHMatrix,0,0) * col + cvmGet(mHMatrix,0,1) * row + cvmGet(mHMatrix,0,2);
			x2 = cvmGet(mHMatrix,1,0) * col + cvmGet(mHMatrix,1,1) * row + cvmGet(mHMatrix,1,2);
			x3 = cvmGet(mHMatrix,2,0) * col + cvmGet(mHMatrix,2,1) * row + 1;
			y_position = x2/x3 + oldFirstApexY;

			if(inImage->height < y_position) {
				y_position = (inImage->height-1);
			}

			x_position = x1/x3 + oldFirstApexX;
			if(inImage->width < x_position) {
				x_position = (inImage->width-1);
			}

			int temp_y = (int)y_position;
			int temp_x = (int)x_position;

			if(dataOut!=NULL && data!=NULL) {
				dataOut[row*outImage->widthStep+col*outImage->nChannels] = data[temp_y*inImage->widthStep+temp_x*inImage->nChannels];
				dataOut[row*outImage->widthStep+col*outImage->nChannels+1] = data[temp_y*inImage->widthStep+temp_x*inImage->nChannels+1];
				dataOut[row*outImage->widthStep+col*outImage->nChannels+2] = data[temp_y*inImage->widthStep+temp_x*inImage->nChannels+2];
			}
		}
	}
	cvReleaseMat(&mHMatrix);
	return outImage;
}
*/
void HomographyCalculationThread::correctHomographicMatrix(IplImage* inImage,CvMat* invH,double lastPictureApexX,double lastPictureApexY,double width,double height)
{
	CvMat *hCoeff = cvCreateMat(3,3,CV_32FC1);
	CvMat* multipleMat = cvCreateMat(3,3,CV_32FC1);
	double old_height = inImage->height;
	double y_position = lastPictureApexY;
	double x_position = lastPictureApexX;
	for (int i=1;i<10;i++) {
		double x1 = cvmGet(invH,0,0) * (x_position) + cvmGet(invH,0,1) * (y_position) + cvmGet(invH,0,2);
		double x2 = cvmGet(invH,1,0) * (x_position) + cvmGet(invH,1,1) * (y_position) + cvmGet(invH,1,2);
		double x3 = cvmGet(invH,2,0) * (x_position) + cvmGet(invH,2,1) * (y_position) + 1;
		x1 = x1/x3;
		x2 = x2/x3;

		double H_coeff = ((x1/width)+(x2/height))/2 - 0.01;
		for(int coeffRow=0;coeffRow<3;coeffRow++) {
			for(int coeffCol=0;coeffCol<3;coeffCol++) {
				cvmSet(hCoeff,coeffRow,coeffCol,H_coeff);
				cvmSet(multipleMat,coeffRow,coeffCol,cvmGet(mHMatrix,coeffRow,coeffCol));
			}
		}
		cvMul(multipleMat,hCoeff,mHMatrix);
		cvInvert(mHMatrix,invH);
	}

	cvReleaseMat(&multipleMat);
	cvReleaseMat(&hCoeff);
}
Esempio n. 6
0
static void node_composit_exec_cvMult(void *data, bNode *node, bNodeStack **in, bNodeStack **out)
{
	CvArr* dst;
	CvArr* src1;
	CvArr* src2;
	
	CV_FUNCNAME( "cvMult" ); 
	if(out[0]->hasoutput==0) return;
	cvSetErrMode(1); //Parent mode error
	__CV_BEGIN__;
	if((in[0]->data)&&(in[1]->data)){
			
		CV_CALL(src1 = in[0]->data);
		CV_CALL(src2 = in[1]->data);
		if(!BOCV_checkAreSameType(src1, src2))
			  CV_ERROR( CV_StsBadArg,"The source inputs are differents" );

		CV_CALL(dst=BOCV_CreateArrFrom(src2));
		if(dst)		
		{
			CV_CALL(cvMul(src1, src2, dst, 1.0));
		 	CV_CALL(out[0]->data= dst);
		}
	}
	__CV_END__;
}
//--------------------------------------------------------------------------------
void ofxCvImage::operator *= ( ofxCvImage& mom ) {
	if( !mom.bAllocated ){
		ofLog(OF_LOG_ERROR, "in *=, mom needs to be allocated");	
		return;	
	}
	if( !bAllocated ){
		ofLog(OF_LOG_NOTICE, "in *=, allocating to match dimensions");			
		allocate(mom.getWidth(), mom.getHeight());
	}

	if( mom.getCvImage()->nChannels == cvImage->nChannels &&
        mom.getCvImage()->depth == cvImage->depth )
    {
        if( matchingROI(getROI(), mom.getROI()) ) {
            float scalef = 1.0f / 255.0f;
            cvMul( cvImage, mom.getCvImage(), cvImageTemp, scalef );
            swapTemp();
            flagImageChanged();
        } else {
            ofLog(OF_LOG_ERROR, "in *=, ROI mismatch");
        }
	} else {
        ofLog(OF_LOG_ERROR, "in *=, images need to have matching type");
	}
}
Esempio n. 8
0
//--------------------------------------------------------------------------------
void ofxCvImage::operator *= ( ofxCvImage& mom ) {
	if( !mom.bAllocated ){
		ofLogError("ofxCvImage") << "operator*=: mom needs to be allocated";	
		return;	
	}
	if( !bAllocated ){
		ofLogNotice("ofxCvImage") << "operator*=: allocating to match dimensions: "
			<< mom.getWidth() << " " << mom.getHeight();
		allocate(mom.getWidth(), mom.getHeight());
	}

	if( mom.getCvImage()->nChannels == cvImage->nChannels &&
        mom.getCvImage()->depth == cvImage->depth )
    {
        if( matchingROI(getROI(), mom.getROI()) ) {
            float scalef = 1.0f / 255.0f;
            cvMul( cvImage, mom.getCvImage(), cvImageTemp, scalef );
            swapTemp();
            flagImageChanged();
        } else {
            ofLogError("ofxCvImage") << "operator*=: region of interest mismatch";
        }
	} else {
        ofLogError("ofxCvImage") << "operator*=: images type mismatch";
	}
}
//--------------------------------------------------------------------------------
void ofxCvFloatImage::operator *= ( ofxCvFloatImage& mom ) {
	if( mom.width == width && mom.height == height ) {
		cvMul( cvImage, mom.getCvImage(), cvImageTemp );
		swapTemp();
	} else {
        cout << "error in *=, images are different sizes" << endl;
	}
}
Esempio n. 10
0
//--------------------------------------------------------------------------------
void CPUImageFilter::amplify ( CPUImageFilter& mom, float level ) {

	float scalef = level / 128.0f;

	cvMul( mom.getCvImage(), mom.getCvImage(), cvImageTemp, scalef );
	swapTemp();
	flagImageChanged();
}
//--------------------------------------------------------------------------------
void ofxCvColorImage::operator *= ( ofxCvColorImage& mom ) {
    float scalef = 1.0f / 255.0f;
	if( mom.width == width && mom.height == height ) {
		cvMul( cvImage, mom.getCvImage(), cvImageTemp, scalef );
		swapTemp();
	} else {
        cout << "error in *=, images are different sizes" << endl;
	}
}
//--------------------------------------------------------------------------------
void setfilter::amplify ( setfilter& mom, float level ) {

	//-- amplify weak areas --//
	float scalef = level / 104.0; //128.0f

	cvMul( mom.getCvImage(), mom.getCvImage(), cvImageTemp, scalef );
	swapTemp();
	flagImageChanged();
}
Esempio n. 13
0
void HarrisBuffer::HarrisFunction(double k, IplImage* dst)
{
  // Harris function in 3D
  // original space-time Harris
  /*detC=  
  cxx.*cyy.*ctt +		xx yy tt
  cxy.*cyt.*cxt +		2 * xy yt xt
  cxt.*cxy.*cyt -		.
  cxx.*cyt.*cyt -		xx yt^2
  cxy.*cxy.*ctt -		tt xy^2	
  cxt.*cyy.*cxt ;		yy xt^2
  */
  cvMul(cxx, cyy, tmp1);
  cvMul(ctt, tmp1, tmp1);

  cvMul(cxy, cxt, tmp2);
  cvMul(cyt, tmp2, tmp2,2);

  cvAdd(tmp1,tmp2,tmp1);

  cvMul(cyt,cyt,tmp2);
  cvMul(cxx,tmp2,tmp2);

  cvSub(tmp1,tmp2,tmp1);

  cvMul(cxy,cxy,tmp2);
  cvMul(ctt,tmp2,tmp2);

  cvSub(tmp1,tmp2,tmp1);

  cvMul(cxt,cxt,tmp2);
  cvMul(cyy,tmp2,tmp2);

  cvSub(tmp1,tmp2,tmp1);

  //trace3C=(cxx+cyy+ctt).^3;
  cvAdd(cxx,cyy,tmp2);
  cvAdd(ctt,tmp2,tmp2);
  cvPow(tmp2,tmp2,3);

  //H=detC-stharrisbuffer.kparam*trace3C;
  cvScale(tmp2,tmp2,k,0);
  cvSub(tmp1,tmp2,dst);
}
Esempio n. 14
0
static void icvH11Ops( CvMat* X, CvMat* Y, void* userdata )
{
	CvH11OpsData* h11 = (CvH11OpsData*)userdata;
	h11->AOps( X, h11->AR, h11->userdata );
	h11->AtOps( h11->AR, h11->AtR, h11->userdata );
	double rc = h11->fe_inv_2 * cvDotProduct( h11->atr, X );
	cvAddWeighted( h11->AtR, -h11->fe_inv, h11->atr, rc, 0, h11->AtR );
	cvMul( h11->sigx, X, h11->tX );
	cvAdd( h11->tX, h11->AtR, Y );
}
Esempio n. 15
0
int main(){
    
    //initialize
    IplImage* src_image = 0;
    IplImage* mul_image = 0;
    IplImage* div_image = 0;
    IplImage* tmp_image = 0;
    
    //load image
    src_image = cvLoadImage("/Users/ihong-gyu/MyProject/OpenCVTest/Lena.jpeg",0);
    
    //create a window
    cvNamedWindow("Original Image", CV_WINDOW_AUTOSIZE);
    cvNamedWindow("Multiply Image", CV_WINDOW_AUTOSIZE);
    cvNamedWindow("Divide Image", CV_WINDOW_AUTOSIZE);
    
    //make images
    tmp_image = cvCreateImage(cvGetSize(src_image),IPL_DEPTH_8U, 1);
    mul_image = cvCreateImage(cvGetSize(src_image),IPL_DEPTH_8U, 1);
    div_image = cvCreateImage(cvGetSize(src_image),IPL_DEPTH_8U, 1);
    
    //multiply&divide
    cvSet(tmp_image,cvScalarAll(1),NULL);
    cvMul(src_image,tmp_image,mul_image,1.5);
    cvMul(src_image,tmp_image,div_image,1.0/2.0);
    
    
    //show the image
    cvShowImage("Original Image", src_image);
    cvShowImage("Multiply Image", mul_image);
    cvShowImage("Divide Image", div_image);
    
    
    //wait for a key
    cvWaitKey(0);
    
    //release the image
    cvReleaseImage(&src_image);
    cvReleaseImage(&mul_image);
    cvReleaseImage(&div_image);
    
    return 0;
}
Esempio n. 16
0
IplImage* multiplier(IplImage *image1, IplImage *image2){
	
	IplImage *res = cvCreateImage(cvGetSize(image1),image1->depth,image1->nChannels);
	
	IplImage *imgCanal1 = cvCreateImage(cvGetSize(image1),image1->depth,1);
	IplImage *imgCanal2 = cvCreateImage(cvGetSize(image1),image1->depth,1);
	IplImage *imgCanal3 = cvCreateImage(cvGetSize(image1),image1->depth,1);
	
	cvSplit(image1,imgCanal1,imgCanal2,imgCanal3,NULL);
	
	cvMul(imgCanal1, image2, imgCanal1, 1);
	cvMul(imgCanal2, image2, imgCanal2, 1);
	cvMul(imgCanal3, image2, imgCanal3, 1);
	
	cvMerge(imgCanal1,imgCanal2,imgCanal3,NULL,res);
	
	cvReleaseImage(&imgCanal1);
	cvReleaseImage(&imgCanal2);
	cvReleaseImage(&imgCanal3);
	
	return res;
	
}
CvMat* HomographyCalculationThread::calculateHomographicMatrix(double newPictureApexX[],double newPictureApexY[],double pictureApexXPosition[],double pictureApexYPosition[]) 
{
	CvMat* mmat = cvCreateMat(3,3,CV_32FC1);
	CvMat* a = cvCreateMat(POINTS*2,9,CV_32FC1);
	for(int count=1;count<POINTS+1;count++) {
		cvmSet(a,2*count-2,0,newPictureApexX[count-1]);
		cvmSet(a,2*count-2,1,newPictureApexY[count-1]);
		cvmSet(a,2*count-2,2,1);
		cvmSet(a,2*count-2,3,0);
		cvmSet(a,2*count-2,4,0);
		cvmSet(a,2*count-2,5,0);
		cvmSet(a,2*count-2,6,(-newPictureApexX[count-1]*pictureApexXPosition[count-1]));
		cvmSet(a,2*count-2,7,(-pictureApexXPosition[count-1]*newPictureApexY[count-1]));
		cvmSet(a,2*count-2,8,-pictureApexXPosition[count-1]);
		cvmSet(a,2*count-1,0,0);
		cvmSet(a,2*count-1,1,0);
		cvmSet(a,2*count-1,2,0);
		cvmSet(a,2*count-1,3,newPictureApexX[count-1]);
		cvmSet(a,2*count-1,4,newPictureApexY[count-1]);
		cvmSet(a,2*count-1,5,1);
		cvmSet(a,2*count-1,6,(-newPictureApexX[count-1]*pictureApexYPosition[count-1]));
		cvmSet(a,2*count-1,7,(-pictureApexYPosition[count-1]*newPictureApexY[count-1]));
		cvmSet(a,2*count-1,8,-pictureApexYPosition[count-1]);
	}
	CvMat* U  = cvCreateMat(8,8,CV_32FC1);
	CvMat* D  = cvCreateMat(8,9,CV_32FC1);
	CvMat* V  = cvCreateMat(9,9,CV_32FC1);
	CvMat* V22 = cvCreateMat(3,3,CV_32FC1);
	mHMatrix = cvCreateMat(3,3,CV_32FC1);
	CvMat* invH = cvCreateMat(3,3,CV_32FC1);
	cvSVD(a, D, U, V, CV_SVD_U_T|CV_SVD_V_T);

	for(int a=0;a<3;a++) {
		for(int b=0;b<3;b++) {
			cvmSet(mmat,a,b,cvmGet(V,8,a*3+b));
			cvmSet(V22,a,b,(1/cvmGet(V,8,4)));
		}
	}

	cvMul(mmat,V22,mHMatrix);
	cvInvert(mHMatrix,invH);
	cvReleaseMat(&U);
	cvReleaseMat(&D);
	cvReleaseMat(&V);
	cvReleaseMat(&V22);
	cvReleaseMat(&a);
	cvReleaseMat(&mmat);
	return invH;
}
Esempio n. 18
0
//--------------------------------------------------------------------------------
void ofxCvFloatImage::operator *= ( ofxCvImage& mom ) {
	if( mom.getCvImage()->nChannels == cvImage->nChannels && 
        mom.getCvImage()->depth == cvImage->depth )
    {
        if( matchingROI(getROI(), mom.getROI()) ) {
            cvMul( cvImage, mom.getCvImage(), cvImageTemp );
            swapTemp();
            flagImageChanged();
        } else {
            ofLog(OF_LOG_ERROR, "in *=, ROI mismatch");
        }
	} else {
        ofLog(OF_LOG_ERROR, "in *=, images need to have matching type");
	}
}
Esempio n. 19
0
//--------------------------------------------------------------------------------
void ofxCvFloatImage::operator *= ( ofxCvImage& mom ) {
	if( mom.getCvImage()->nChannels == cvImage->nChannels && 
        mom.getCvImage()->depth == cvImage->depth )
    {
        if( pushSetBothToTheirIntersectionROI(*this,mom) ) {
            cvMul( cvImage, mom.getCvImage(), cvImageTemp );
            swapTemp();
            popROI();       //restore prevoius ROI
            mom.popROI();   //restore prevoius ROI              
            flagImageChanged();
        } else {
            ofLog(OF_LOG_ERROR, "in *=, ROI mismatch");
        }
	} else {
        ofLog(OF_LOG_ERROR, "in *=, images need to have matching type");
	}
}
double pkmGaussianMixtureModel::multinormalDistribution(const CvMat *pts, const CvMat *mean, const CvMat *covar)
{
	
	int dimensions = 2;
	//  add a tiny bit because of small samples
	CvMat *covarShifted = cvCreateMat(2, 2, CV_64FC1);
	cvAddS( covar, cvScalarAll(0.001), covarShifted);
	
	// calculate the determinant
	double det = cvDet(covarShifted);
	
	// invert covariance
	CvMat *covarInverted = cvCreateMat(2, 2, CV_64FC1);
	cvInvert(covarShifted, covarInverted);
	
	double ff = (1.0/(2.0*(double)PI))*(pow(det,-0.5));
	
	CvMat *centered = cvCreateMat(2, 1, CV_64FC1);
	cvSub(pts, mean, centered);
	
	CvMat *invxmean = cvCreateMat(2, 1, CV_64FC1);
	//cvGEMM(covarInverted, centered, 1., NULL, 1., invxmean);
	cvMatMul(covarInverted, centered, invxmean);
	
	cvMul(centered, invxmean, invxmean);
	CvScalar sum = cvSum(invxmean);
	/*
	 printf("covar: %f %f %f %f\n", cvmGet(covar, 0, 0), cvmGet(covar, 0, 1), cvmGet(covar, 1, 0), cvmGet(covar, 1, 1));
	 printf("covarShifted: %f %f %f %f\n", cvmGet(covarShifted, 0, 0), cvmGet(covarShifted, 0, 1), cvmGet(covarShifted, 1, 0), cvmGet(covarShifted, 1, 1));
	 printf("det: %f\n", det);
	 printf("covarInverted: %f %f %f %f\n", cvmGet(covarInverted, 0, 0), cvmGet(covarInverted, 0, 1), cvmGet(covarInverted, 1, 0), cvmGet(covarShifted, 1, 1));
	 printf("ff: %f\n", ff);
	 printf("pts: %f %f)\n", cvmGet(pts, 0, 0), cvmGet(pts, 1, 0));
	 printf("mean: %f %f)\n", cvmGet(mean, 0, 0), cvmGet(mean, 1, 0));
	 printf("centered: %f %f)\n", cvmGet(centered, 0, 0), cvmGet(centered, 1, 0));
	 printf("invxmean: %f %f)\n", cvmGet(invxmean, 0, 0), cvmGet(invxmean, 1, 0));
	 printf("scalar: %f %f %f %f\n", sum.val[0], sum.val[1], sum.val[2], sum.val[3]);
	 */
	cvReleaseMat(&covarShifted);
	cvReleaseMat(&covarInverted);
	cvReleaseMat(&centered);
	cvReleaseMat(&invxmean);
	
	return ff * exp(-0.5*sum.val[0]);
	
}
Esempio n. 21
0
//--------------------------------------------------------------------------------
void ofxCvFloatImage::operator *= ( ofxCvImage& mom ) {
	if( mom.getWidth() == 0 || mom.getHeight() == 0 ){
		ofLog(OF_LOG_ERROR, "in *=, mom width or height is 0");	
		return;	
	}
	if( !bAllocated ){
		ofLog(OF_LOG_ERROR, "in *=, image is not allocated");		
		return;	
	}
		
	if( mom.getCvImage()->nChannels == cvImage->nChannels && mom.getCvImage()->depth == cvImage->depth ){
        if( matchingROI(getROI(), mom.getROI()) ) {
            cvMul( cvImage, mom.getCvImage(), cvImageTemp );
            swapTemp();
            flagImageChanged();
        } else {
            ofLog(OF_LOG_ERROR, "in *=, ROI mismatch");
        }
	} else {
        ofLog(OF_LOG_ERROR, "in *=, images need to have matching type");
	}
}
Esempio n. 22
0
int main(int argc, _TCHAR* argv[])
{
	cvNamedWindow( "Background Averaging", CV_WINDOW_AUTOSIZE );
	CvCapture* capture = cvCreateFileCapture( "tree.avi" );
	IplImage *frame, *mask1, *mask3;

	int frameCount = 0;
	while(1) {
		frameCount++;
		frame = cvQueryFrame( capture );
		if( !frame ) break;
		CvSize sz = cvGetSize( frame );
		mask1 = cvCreateImage( sz, IPL_DEPTH_8U, 1 );
		mask3 = cvCreateImage( sz, IPL_DEPTH_8U, 3 );
		if(frameCount == 1)
			AllocateImages( frame );
		

		if( frameCount < 30 ){
			accumulateBackground( frame );
		}else if( frameCount == 30 ){
			createModelsfromStats();
		}else{
			backgroundDiff( frame, mask1 );

			cvCvtColor(mask1,mask3,CV_GRAY2BGR);
			cvNorm( mask3, mask3, CV_C, 0);
			cvThreshold(mask3, mask3, 100, 1, CV_THRESH_BINARY);
			cvMul( frame, mask3, frame, 1.0 );
			cvShowImage( "Background Averaging", frame );
		}

		char c = cvWaitKey(33);
		if( c == 27 ) break;
	}
	cvReleaseCapture( &capture );
	cvDestroyWindow( "Background Averaging" );
	DeallocateImages();
}
Esempio n. 23
0
//------------------------------------------------------------------------------
// Color Similarity Matrix Calculation
//------------------------------------------------------------------------------
CvMat *colorsim(int nbins, double sigma) {

	CvMat *xc=cvCreateMat(1,nbins, CV_32FC1);
	CvMat *yr=cvCreateMat(nbins,1, CV_32FC1);

	CvMat *x=cvCreateMat(nbins,nbins, CV_32FC1);
	CvMat *y=cvCreateMat(nbins,nbins, CV_32FC1);
	CvMat *m=cvCreateMat(x->rows,x->rows, CV_32FC1);


	// Set x,y directions 
	for (int j=0;j<nbins;j++) {
		cvSetReal2D(xc,0,j,(j+1-0.5)/nbins);
		cvSetReal2D(yr,j,0,(j+1-0.5)/nbins);
	}

	// Set u,v, meshgrids
	for (int i=0;i<x->rows;i++) {
		cvRepeat(xc,x);
		cvRepeat(yr,y);
	}

	CvMat *sub = cvCreateMat(x->rows,y->cols,CV_32FC1);
	cvSub(x,y,sub);
	cvAbs(sub,sub);
	cvMul(sub,sub,sub);
	cvConvertScale(sub,sub,-1.0/(2*sigma*sigma));
	cvExp(sub,sub);
	cvSubRS(sub,cvScalar(1.0),m);

	cvReleaseMat(&xc);
	cvReleaseMat(&yr);
	cvReleaseMat(&x);
	cvReleaseMat(&y);
	cvReleaseMat(&sub);

	return m;
}
Esempio n. 24
0
void HarrisBuffer::DetectInterestPoints(int border)
{
	ipList.clear();
	Hbuffer.FindLocalMaxima(ipList,true);
	CvMat *reg=cvCreateMat( SizeNeighb, 1, CV_64F );
	
		
	//remove border
	if(border<2)
		border=2; // interest points in the boundary should be remove to have a valid local 5x5x5 mask
				  // an alternative could be extending by 2 pixel in space dimensions	

	//select significant points which are not in the boundary
	for(int i=0;i<(int)ipList.size();i++)
		if(ipList[i].x>=border && (ipList[i].x<frame->width-border) &&
			ipList[i].y>=border && (ipList[i].y<frame->height-border) && ipList[i].val>SignificantPointThresh)
			ipList[i].reject=false;

	//computing JET features around an interest point by 5x5x5 local mask
	for(int i=0;i<(int)ipList.size();i++)
		if(!ipList[i].reject)
		{
			ipList[i].features=cvCreateMat( LengthFeatures, 1, CV_64F );
			convbuffer.GetLocalRegion(ipList[i].x,ipList[i].y ,ipList[i].t,5,5,5, reg);
			cvMatMul(&JetFilter,reg,ipList[i].features);
			cvMul(ipList[i].features,normvec,ipList[i].features);//normalization
		}
	cvReleaseMat(&reg);

	//check tstamp for any possible error

	//writing selected interest points to file
	for(int i=0;i<(int)ipList.size();i++)
		if(!ipList[i].reject)
			WriteFeatures(ipList[i]);
}		
Esempio n. 25
0
void HarrisBuffer::OpticalFlowFromSMM()
{
  // ref: Laptev et al. CVIU 2007, eq.(8)
  cvMul(cxx, cyy, tmp1);
  cvMul(cxy, cxy, tmp2);
  cvSub(tmp1,tmp2,tmp5);

  cvMul(cyy, cxt, tmp3);
  cvMul(cxy, cyt, tmp4);
  cvSub(tmp3,tmp4,tmp6);

  cvDiv(tmp6,tmp5,OFx);

  cvMul(cxx, cyt, tmp3);
  cvMul(cxy, cxt, tmp4);
  cvSub(tmp3,tmp4,tmp6);

  cvDiv(tmp6,tmp5,OFy);
}
Esempio n. 26
0
int _harris(IplImage *src, float threshold, float *_cims) {
    IplImage *deriX = derivateX(src);
    IplImage *deriY = derivateY(src);
    IplImage *deriXY = cvCloneImage(deriX);
    //
    cvMul(deriX, deriY, deriXY);
    cvMul(deriX, deriX, deriX);
    cvMul(deriY, deriY, deriY);

    cvSmooth(deriX, deriX, CV_GAUSSIAN, 5);
    cvSmooth(deriY, deriY, CV_GAUSSIAN, 5);
    cvSmooth(deriXY, deriXY, CV_GAUSSIAN, 5);

    int w = src->width;
    int h = src->height;
    float *cims = _cims;
    float k = 0.06;
    float *vals = new float[w * h];
    memset(vals, 0, w * h);

    for (int y = 0; y < h; y++) {
        for (int x = 0; x < w; x++) {
            float Ix = pixval32f(deriX, x, y);
            float Iy = pixval32f(deriY, x, y);
            float Ixy = pixval32f(deriXY, x, y);

            float det = Ix * Iy - Ixy * Ixy;
            float tr = Ix + Iy;

            float cim = det - k * tr * tr;
//          if (cim > threshold) {
//              cims[y * w + x] = cim;
//          } else
//              cims[y * w + x] = 0.0;
            cims[y * w + x] = cim;
            vals[y * w + x] = cim;
        }
    }

    sort(vals, vals + w * h);
    int num =  w * h > 500 ? 500 : w * h * 3 / 4;
//  float thres = vals[w * h - 500];
    float thres = 5000;

    for (int y = filterSize; y < h - filterSize; y++) {
        for (int x = filterSize; x < w- filterSize; x++) {
//          if (cims[y * w + x] >= thres && is_extremun(cims, x, y, w, h, 10)) {
//          }
//          if (cims[y * w + x] < thres) {
//              cims[y * w + x] = 0;
//          }
        }
    }

    delete [] vals;
    cvReleaseImage(&deriX);
    cvReleaseImage(&deriY);
    cvReleaseImage(&deriXY);

    return 0;
}
Esempio n. 27
0
IplImage *harris_center(IplImage *img, float threshold) {
    IplImage *src = get_gray(img);
    IplImage *deriX = derivateX(src);
    IplImage *deriY = derivateY(src);
    IplImage *deriXY = cvCloneImage(src);

    cvMul(deriX, deriY, deriXY);
    cvMul(deriX, deriX, deriX);
    cvMul(deriY, deriY, deriY);

    cvSmooth(deriX, deriX, CV_GAUSSIAN, 5);
    cvSmooth(deriY, deriY, CV_GAUSSIAN, 5);
    cvSmooth(deriXY, deriXY, CV_GAUSSIAN, 5);

    int w = src->width;
    int h = src->height;

    int centerX = w / 2;
    int centerY = h / 2;
    int ctrSize = w / 3;

    float *cims = new float[(ctrSize + 1) * (ctrSize + 1)];
    float *vals = new float[(ctrSize + 1) * (ctrSize + 1)];
    memset(vals, 0, sizeof(vals));

    int sx = centerX - ctrSize / 2;
    int sy = centerY - ctrSize / 2;
    float k = 0.06;
    for (int i = 0; i < ctrSize; i++) {
        for (int j = 0; j < ctrSize; j++) {
            int x = sx + j;
            int y = sy + i;

            float Ix = pixval32f(deriX, x, y);
            float Iy = pixval32f(deriY, x, y);
            float Ixy = pixval32f(deriXY, x, y);

            float det = Ix * Iy - Ixy * Ixy;
            float tr = Ix + Iy;
            float cim = det - k * tr * tr;

            cims[i * ctrSize + j] = cim;
            vals[i * ctrSize + j] = cim;
        }
    }

    sort(vals, vals + ctrSize * ctrSize);
    float thres = vals[ctrSize * ctrSize - 10];

    IplImage *printImg = cvCloneImage(img);
    for (int i = 0; i < ctrSize; i++) {
        for (int j = 0; j < ctrSize; j++) {
            if (cims[i * ctrSize + j] >= thres) {
                drawPoint(printImg, sx + j, sy + i);
            }
        }
    }

    show_image(printImg, "center");

    cvReleaseImage(&src);
    cvReleaseImage(&deriX);
    cvReleaseImage(&deriY);
    cvReleaseImage(&deriXY);

    return printImg;
}
Esempio n. 28
0
IplImage *harris(IplImage *img, float threshold, float ***ptsDes, int *npts, int *ndes, t_point **_pts) {
    IplImage *src = get_gray(img);
    IplImage *deriX = derivateX(src);
    IplImage *deriY = derivateY(src);
    IplImage *deriXY = cvCloneImage(deriX);
    //
    cvMul(deriX, deriY, deriXY);
    cvMul(deriX, deriX, deriX);
    cvMul(deriY, deriY, deriY);

//  cvNamedWindow("1", CV_WINDOW_AUTOSIZE);
//  cvShowImage("1", deriX);
//  cvWaitKey(0);

    cvSmooth(deriX, deriX, CV_GAUSSIAN, 5);
    cvSmooth(deriY, deriY, CV_GAUSSIAN, 5);
    cvSmooth(deriXY, deriXY, CV_GAUSSIAN, 5);

//  cvNamedWindow("1", CV_WINDOW_AUTOSIZE);
//  cvShowImage("1", deriX);
//  cvWaitKey(0);

    IplImage *printImg = cvCloneImage(img);

    int w = src->width;
    int h = src->height;
    float *cims = new float[w * h];
    float *vals = new float[w * h];
    float k = 0.06;
    memset(vals, 0, sizeof(vals));
//  t_point *pts =new t_point[w * h + 1]();

    for (int y = 0; y < h; y++) {
        for (int x = 0; x < w; x++) {
            float Ix = pixval32f(deriX, x, y);
            float Iy = pixval32f(deriY, x, y);
            float Ixy = pixval32f(deriXY, x, y);

            float det = Ix * Iy - Ixy * Ixy;
            float tr = Ix + Iy;

            float cim = det - k * tr * tr;
            cims[y * w + x] = cim;
            vals[y * w + x] = cim;

//          pts[y * w + x].x = x;
//          pts[y * w + x].y = y;
//          pts[y * w + x].val = cim;
        }
    }

//  cout << "ok\n";
//  //sort(pts, pts + w * h, cmp);
//  qsort(pts, sizeof(t_point), w * h, _cmp);
//  cout << "ok\n";
//  int num = w * h - 1;
//  int feat = 0;
//  int contentSize = filterSize;
//  while (num-- >= 0) {
//      int x = pts[num].x;
//      int y = pts[num].y;
//      if (x < contentSize || x > w - contentSize ||
//          y < contentSize || y > w - contentSize)
//          continue;
//      if (is_extremun(cims, pts[num].x, pts[num].y, w, h, filterSize * 4)) {
//          drawPoint(printImg, pts[num].x, pts[num].y);
//          feat++;
//          if (feat > 500)
//              break;
//      }
//  }

    sort(vals, vals + w * h);
    //float thres = 7000;
    int num =  w * h > 4000 ? 4000 : w * h * 3 / 4;
    float thres = vals[w * h - num];
    t_point *pts = new t_point[4000];
    int count = 0;

    for (int y = filterSize; y < h - filterSize; y++) {
        for (int x = filterSize; x < w- filterSize; x++) {
            if (cims[y * w + x] >= thres && is_extremun(cims, x, y, w, h, filterSize)) {
//                drawPoint(printImg, x, y);
                if (cims[y * w + x] == vals[w * h - 1]) {
                    drawPoint(printImg, x, y);
                }
                pts[count].x = x;
                pts[count++].y = y;
            }
        }
    }

    float **desc;
    int descSize;

    desc = describe_feature(src, pts, count, descSize);

//  cout << "\n\n****************\n";
//  for (int i = 0; i < count; i++) {
//      for (int j = 0; j < descSize; j++) {
//          cout << desc[i][j] << "\t";
//      }
//      cout << endl;
//  }

    /*return the result*/
    *ptsDes = desc;
    *npts = count;
    *ndes = descSize;
    *_pts = pts;

    cvNamedWindow("1", CV_WINDOW_AUTOSIZE);
    cvShowImage("1", printImg);
    cvWaitKey(0);

    delete [] vals;
    delete [] cims;
    cvReleaseImage(&src);
    cvReleaseImage(&deriX);
    cvReleaseImage(&deriY);
    cvReleaseImage(&deriXY);

    return printImg;
}
Esempio n. 29
0
//------------------------------------------------------------------------------
//------------------------------------------------------------------------------
CvMat *tgso (CvMat &tmap, int ntex, double sigma, double theta, CvMat &tsim, int useChi2) {


	CvMat *roundTmap=cvCreateMat(tmap.rows,tmap.cols,CV_32FC1);
	CvMat *comp=cvCreateMat(tmap.rows,tmap.cols,CV_32FC1);

	for (int i=0;i<tmap.rows;i++)
		for (int j=0;j<tmap.cols;j++)
			cvSetReal2D(roundTmap,i,j,cvRound(cvGetReal2D(&tmap,i,j)));

	cvSub(&tmap,roundTmap,comp);
	if (cvCountNonZero(comp)) {
		printf("texton labels not integral");
		cvReleaseMat(&roundTmap);
		cvReleaseMat(&comp);
		exit(1);
	}

	double min,max;
	cvMinMaxLoc(&tmap,&min,&max);
	if (min<1 && max>ntex) {
		char *msg=new char[50];
		printf(msg,"texton labels out of range [1,%d]",ntex);
		cvReleaseMat(&roundTmap);
		cvReleaseMat(&comp);
		exit(1);
	}

	cvReleaseMat(&roundTmap);
	cvReleaseMat(&comp);


	double wr=floor(sigma); //sigma=radius (Leo) 

	CvMat *x=cvCreateMat(1,wr-(-wr)+1, CV_64FC1);
	CvMat *y=cvCreateMat(wr-(-wr)+1,1, CV_64FC1);

	CvMat *u=cvCreateMat(wr-(-wr)+1,wr-(-wr)+1, CV_64FC1);
	CvMat *v=cvCreateMat(wr-(-wr)+1,wr-(-wr)+1, CV_64FC1);
	CvMat *gamma=cvCreateMat(u->rows,v->rows, CV_64FC1);

	// Set x,y directions 
	for (int j=-wr;j<=wr;j++) {
		cvSetReal2D(x,0,(j+wr),j);
		cvSetReal2D(y,(j+wr),0,j);
	}

	// Set u,v, meshgrids
	for (int i=0;i<u->rows;i++) {
		cvRepeat(x,u);
		cvRepeat(y,v);
	}

	// Compute the gamma matrix from the grid
	for (int i=0;i<u->rows;i++) 
		for (int j=0;j<u->cols;j++)
			cvSetReal2D(gamma,i,j,atan2(cvGetReal2D(v,i,j),cvGetReal2D(u,i,j)));

	cvReleaseMat(&x);
	cvReleaseMat(&y);

	CvMat *sum=cvCreateMat(u->rows,u->cols, CV_64FC1);
	cvMul(u,u,u);
	cvMul(v,v,v);
	cvAdd(u,v,sum);
	CvMat *mask=cvCreateMat(u->rows,u->cols, CV_8UC1);
	cvCmpS(sum,sigma*sigma,mask,CV_CMP_LE);
	cvConvertScale(mask,mask,1.0/255);
	cvSetReal2D(mask,wr,wr,0);
	int count=cvCountNonZero(mask);

	cvReleaseMat(&u);
	cvReleaseMat(&v);
	cvReleaseMat(&sum);

	CvMat *sub=cvCreateMat(mask->rows,mask->cols, CV_64FC1);
	CvMat *side=cvCreateMat(mask->rows,mask->cols, CV_8UC1);

	cvSubS(gamma,cvScalar(theta),sub);
	cvReleaseMat(&gamma);

	for (int i=0;i<mask->rows;i++){
		for (int j=0;j<mask->cols;j++) {
			double n=cvmGet(sub,i,j);
			double n_mod = n-floor(n/(2*M_PI))*2*M_PI;
			cvSetReal2D(side,i,j, 1 + int(n_mod < M_PI));
		}
	}

	cvMul(side,mask,side);
	cvReleaseMat(&sub);
	cvReleaseMat(&mask);

	CvMat *lmask=cvCreateMat(side->rows,side->cols, CV_8UC1);
	CvMat *rmask=cvCreateMat(side->rows,side->cols, CV_8UC1);
	cvCmpS(side,1,lmask,CV_CMP_EQ);
	cvCmpS(side,2,rmask,CV_CMP_EQ);
	int count1=cvCountNonZero(lmask), count2=cvCountNonZero(rmask);
	if (count1 != count2) {
		printf("Bug: imbalance\n");
	}

	CvMat *rlmask=cvCreateMat(side->rows,side->cols, CV_32FC1);
	CvMat *rrmask=cvCreateMat(side->rows,side->cols, CV_32FC1);
	cvConvertScale(lmask,rlmask,1.0/(255*count)*2);
	cvConvertScale(rmask,rrmask,1.0/(255*count)*2);


	cvReleaseMat(&lmask);
	cvReleaseMat(&rmask);
	cvReleaseMat(&side);

	int h=tmap.rows;
	int w=tmap.cols;


	CvMat *d       = cvCreateMat(h*w,ntex,CV_32FC1);
	CvMat *coltemp = cvCreateMat(h*w,1,CV_32FC1);
	CvMat *tgL     = cvCreateMat(h,w, CV_32FC1);
	CvMat *tgR     = cvCreateMat(h,w, CV_32FC1);
	CvMat *temp    = cvCreateMat(h,w,CV_8UC1);
	CvMat *im      = cvCreateMat(h,w, CV_32FC1);
	CvMat *sub2    = cvCreateMat(h,w,CV_32FC1);
	CvMat *sub2t   = cvCreateMat(w,h,CV_32FC1);
	CvMat *prod    = cvCreateMat(h*w,ntex,CV_32FC1);
	CvMat reshapehdr,*reshape;

	CvMat* tgL_pad = cvCreateMat(h+rlmask->rows-1,w+rlmask->cols-1,CV_32FC1);
	CvMat* tgR_pad = cvCreateMat(h+rlmask->rows-1,w+rlmask->cols-1,CV_32FC1);
	CvMat* im_pad  = cvCreateMat(h+rlmask->rows-1,w+rlmask->cols-1,CV_32FC1);

	CvMat *tg=cvCreateMat(h,w,CV_32FC1);
	cvZero(tg);
	
	if (useChi2 == 1){
		CvMat* temp_add1 = cvCreateMat(h,w,CV_32FC1);
		for (int i=0;i<ntex;i++) {
			cvCmpS(&tmap,i+1,temp,CV_CMP_EQ); 
			cvConvertScale(temp,im,1.0/255);

			cvCopyMakeBorder(tgL,tgL_pad,cvPoint((rlmask->cols-1)/2,(rlmask->rows-1)/2),IPL_BORDER_CONSTANT);
			cvCopyMakeBorder(tgR,tgR_pad,cvPoint((rlmask->cols-1)/2,(rlmask->rows-1)/2),IPL_BORDER_CONSTANT);
			cvCopyMakeBorder(im,im_pad,cvPoint((rlmask->cols-1)/2,(rlmask->rows-1)/2),IPL_BORDER_CONSTANT);

			cvFilter2D(im_pad,tgL_pad,rlmask,cvPoint((rlmask->cols-1)/2,(rlmask->rows-1)/2));
			cvFilter2D(im_pad,tgR_pad,rrmask,cvPoint((rlmask->cols-1)/2,(rlmask->rows-1)/2));

			cvGetSubRect(tgL_pad,tgL,cvRect((rlmask->cols-1)/2,(rlmask->rows-1)/2,tgL->cols,tgL->rows));
			cvGetSubRect(tgR_pad,tgR,cvRect((rlmask->cols-1)/2,(rlmask->rows-1)/2,tgR->cols,tgR->rows));

			cvSub(tgL,tgR,sub2);
			cvPow(sub2,sub2,2.0);
			cvAdd(tgL,tgR,temp_add1);
			cvAddS(temp_add1,cvScalar(0.0000000001),temp_add1);
			cvDiv(sub2,temp_add1,sub2);
			cvAdd(tg,sub2,tg);
		}
		cvScale(tg,tg,0.5);

		cvReleaseMat(&temp_add1);

	}
	else{// if not chi^2
		for (int i=0;i<ntex;i++) {
			cvCmpS(&tmap,i+1,temp,CV_CMP_EQ); 
			cvConvertScale(temp,im,1.0/255);

			cvCopyMakeBorder(tgL,tgL_pad,cvPoint((rlmask->cols-1)/2,(rlmask->rows-1)/2),IPL_BORDER_CONSTANT);
			cvCopyMakeBorder(tgR,tgR_pad,cvPoint((rlmask->cols-1)/2,(rlmask->rows-1)/2),IPL_BORDER_CONSTANT);
			cvCopyMakeBorder(im,im_pad,cvPoint((rlmask->cols-1)/2,(rlmask->rows-1)/2),IPL_BORDER_CONSTANT);

			cvFilter2D(im_pad,tgL_pad,rlmask,cvPoint((rlmask->cols-1)/2,(rlmask->rows-1)/2));
			cvFilter2D(im_pad,tgR_pad,rrmask,cvPoint((rlmask->cols-1)/2,(rlmask->rows-1)/2));

			cvGetSubRect(tgL_pad,tgL,cvRect((rlmask->cols-1)/2,(rlmask->rows-1)/2,tgL->cols,tgL->rows));
			cvGetSubRect(tgR_pad,tgR,cvRect((rlmask->cols-1)/2,(rlmask->rows-1)/2,tgR->cols,tgR->rows));

			cvSub(tgL,tgR,sub2);
			cvAbs(sub2,sub2);
			cvTranspose(sub2,sub2t);
			reshape=cvReshape(sub2t,&reshapehdr,0,h*w);
			cvGetCol(d,coltemp,i);
			cvCopy(reshape,coltemp);
		}

		cvMatMul(d,&tsim,prod);
		cvMul(prod,d,prod);


		CvMat *sumcols=cvCreateMat(h*w,1,CV_32FC1);
		cvSetZero(sumcols);
		for (int i=0;i<prod->cols;i++) {
			cvGetCol(prod,coltemp,i);
			cvAdd(sumcols,coltemp,sumcols);
		}

		reshape=cvReshape(sumcols,&reshapehdr,0,w);
		cvTranspose(reshape,tg);

		cvReleaseMat(&sumcols);
	}


	//Smooth the gradient now!!
	tg=fitparab(*tg,sigma,sigma/4,theta);
	cvMaxS(tg,0,tg); 

	
	cvReleaseMat(&im_pad);
	cvReleaseMat(&tgL_pad);
	cvReleaseMat(&tgR_pad);
	cvReleaseMat(&rlmask);
	cvReleaseMat(&rrmask);
	cvReleaseMat(&im);
	cvReleaseMat(&tgL);
	cvReleaseMat(&tgR);
	cvReleaseMat(&temp);
	cvReleaseMat(&coltemp);
	cvReleaseMat(&sub2);
	cvReleaseMat(&sub2t);
	cvReleaseMat(&d);
	cvReleaseMat(&prod);

	return tg;

}
Esempio n. 30
0
static GstFlowReturn gst_gcs_transform_ip(GstBaseTransform * btrans, GstBuffer * gstbuf) 
{
  GstGcs *gcs = GST_GCS (btrans);

  GST_GCS_LOCK (gcs);

  //////////////////////////////////////////////////////////////////////////////
  // get image data from the input, which is RGBA or BGRA
  gcs->pImageRGBA->imageData = (char*)GST_BUFFER_DATA(gstbuf);
  cvSplit(gcs->pImageRGBA,   gcs->pImgCh1, gcs->pImgCh2, gcs->pImgCh3, gcs->pImgChX );
  cvCvtColor(gcs->pImageRGBA,  gcs->pImgRGB, CV_BGRA2BGR);


  //////////////////////////////////////////////////////////////////////////////
  ////////////////////////////////////////////////////////MOTION CUES INTEGR////
  //////////////////////////////////////////////////////////////////////////////

  //////////////////////////////////////////////////////////////////////////////
  // apply step 1. filtering using bilateral filter. Cannot happen in-place => scratch
  cvSmooth(gcs->pImgRGB, gcs->pImgScratch, CV_BILATERAL, 3, 50, 3, 0);
  // create GRAY image
  cvCvtColor(gcs->pImgScratch, gcs->pImgGRAY, CV_BGR2GRAY);

  // Frame difference the GRAY and the previous one
  // not intuitive: first smooth frames, then 
  cvCopy( gcs->pImgGRAY,   gcs->pImgGRAY_copy,  NULL);
  cvCopy( gcs->pImgGRAY_1, gcs->pImgGRAY_1copy, NULL);
  get_frame_difference( gcs->pImgGRAY_copy, gcs->pImgGRAY_1copy, gcs->pImgGRAY_diff);
  cvErode( gcs->pImgGRAY_diff, gcs->pImgGRAY_diff, NULL, 3);
  cvDilate( gcs->pImgGRAY_diff, gcs->pImgGRAY_diff, NULL, 3);


  //////////////////////////////////////////////////////////////////////////////
  //////////////////////////////////////////////////////////////////////////////
  // ghost mapping
  gcs->dstTri[0].x = gcs->facepos.x - gcs->facepos.width/2 ;
  gcs->dstTri[0].y = gcs->facepos.y - gcs->facepos.height/2;
  gcs->dstTri[1].x = gcs->facepos.x - gcs->facepos.width/2;
  gcs->dstTri[1].y = gcs->facepos.y + gcs->facepos.height/2;
  gcs->dstTri[2].x = gcs->facepos.x + gcs->facepos.width/2;
  gcs->dstTri[2].y = gcs->facepos.y + gcs->facepos.height/2;

  if( gcs->ghostfilename){
    cvGetAffineTransform( gcs->srcTri, gcs->dstTri, gcs->warp_mat );
    cvWarpAffine( gcs->cvGhostBwResized, gcs->cvGhostBwAffined, gcs->warp_mat );
  }




  //////////////////////////////////////////////////////////////////////////////
  //////////////////////////////////////////////////////////////////////////////
  // GrabCut algorithm preparation and running

  gcs->facepos.x = gcs->facepos.x - gcs->facepos.width/2;
  gcs->facepos.y = gcs->facepos.y - gcs->facepos.height/2;

  // create an IplImage  with the skin colour pixels as 255
  compose_skin_matrix(gcs->pImgRGB, gcs->pImg_skin);
  // And the skin pixels with the movement mask
  cvAnd( gcs->pImg_skin,  gcs->pImgGRAY_diff,  gcs->pImgGRAY_diff);
  //cvErode( gcs->pImgGRAY_diff, gcs->pImgGRAY_diff, cvCreateStructuringElementEx(5, 5, 3, 3, CV_SHAPE_RECT,NULL), 1);
  cvDilate(gcs->pImgGRAY_diff, gcs->pImgGRAY_diff, cvCreateStructuringElementEx(7,7, 5,5, CV_SHAPE_RECT,NULL), 2);
  cvErode( gcs->pImgGRAY_diff, gcs->pImgGRAY_diff, cvCreateStructuringElementEx(5,5, 3,3, CV_SHAPE_RECT,NULL), 2);

  // if there is alpha==all 1's coming in, then we ignore it: prevents from no vibe before us
  if((0.75*(gcs->width * gcs->height) <= cvCountNonZero(gcs->pImgChX)))
    cvZero(gcs->pImgChX);
  // OR the input Alpha
  cvOr( gcs->pImgChX,  gcs->pImgGRAY_diff,  gcs->pImgGRAY_diff);


  //////////////////////////////////////////////////////////////////////////////
  // try to consolidate a single mask from all the sub-patches
  cvDilate(gcs->pImgGRAY_diff, gcs->pImgGRAY_diff, cvCreateStructuringElementEx(7,7, 5,5, CV_SHAPE_RECT,NULL), 3);
  cvErode( gcs->pImgGRAY_diff, gcs->pImgGRAY_diff, cvCreateStructuringElementEx(5,5, 3,3, CV_SHAPE_RECT,NULL), 4);

  //////////////////////////////////////////////////////////////////////////////
  // use either Ghost or boxes-model to create a PR foreground starting point in gcs->grabcut_mask
  if( gcs->ghostfilename)
    compose_grabcut_seedmatrix3(gcs->grabcut_mask, gcs->cvGhostBwAffined, gcs->pImgGRAY_diff  );
  else{
    // toss it all to the bbox creation function, together with the face position and size
    compose_grabcut_seedmatrix2(gcs->grabcut_mask, gcs->facepos, gcs->pImgGRAY_diff, gcs->facefound );
  }


  //////////////////////////////////////////////////////////////////////////////
#ifdef KMEANS
  gcs->num_clusters = 18; // keep it even to simplify integer arithmetics
  cvCopy(gcs->pImgRGB, gcs->pImgRGB_kmeans, NULL);
  posterize_image(gcs->pImgRGB_kmeans);
  create_kmeans_clusters(gcs->pImgRGB_kmeans, gcs->kmeans_points, gcs->kmeans_clusters, 
                         gcs->num_clusters, gcs->num_samples);
  adjust_bodybbox_w_clusters(gcs->grabcut_mask, gcs->pImgRGB_kmeans, gcs->num_clusters, gcs->facepos);
#endif //KMEANS


  //////////////////////////////////////////////////////////////////////////////
  if( gcs->debug < 70)
    run_graphcut_iteration( &(gcs->GC), gcs->pImgRGB, gcs->grabcut_mask, &gcs->bbox_prev);



  // get a copy of GRAY for the next iteration
  cvCopy(gcs->pImgGRAY, gcs->pImgGRAY_1, NULL);

  //////////////////////////////////////////////////////////////////////////////
  // if we want to display, just overwrite the output
  if( gcs->display ){
    int outputimage = gcs->debug;
    switch( outputimage ){
    case 1: // output the GRAY difference
      cvCvtColor( gcs->pImgGRAY_diff, gcs->pImgRGB, CV_GRAY2BGR );
      break;
    case 50:// Ghost remapped
      cvCvtColor( gcs->cvGhostBwAffined, gcs->pImgRGB, CV_GRAY2BGR );
      break;
    case 51:// Ghost applied
      cvAnd( gcs->cvGhostBwAffined, gcs->pImgGRAY, gcs->pImgGRAY, NULL );
      cvCvtColor( gcs->pImgGRAY, gcs->pImgRGB, CV_GRAY2BGR );
      break;
    case 60:// Graphcut
      cvAndS(gcs->grabcut_mask, cvScalar(1), gcs->grabcut_mask, NULL);  // get only FG
      cvConvertScale( gcs->grabcut_mask, gcs->grabcut_mask, 127.0);
      cvCvtColor( gcs->grabcut_mask, gcs->pImgRGB, CV_GRAY2BGR );
      break;
    case 61:// Graphcut applied on input/output image
      cvAndS(gcs->grabcut_mask, cvScalar(1), gcs->grabcut_mask, NULL);  // get only FG, PR_FG
      cvConvertScale( gcs->grabcut_mask, gcs->grabcut_mask, 255.0);
      cvAnd( gcs->grabcut_mask,  gcs->pImgGRAY,  gcs->pImgGRAY, NULL);
      cvCvtColor( gcs->pImgGRAY, gcs->pImgRGB, CV_GRAY2BGR );

      cvRectangle(gcs->pImgRGB, cvPoint(gcs->bbox_now.x, gcs->bbox_now.y), 
                  cvPoint(gcs->bbox_now.x + gcs->bbox_now.width, gcs->bbox_now.y+gcs->bbox_now.height),
                  cvScalar(127,0.0), 1, 8, 0 );
     break;
    case 70:// bboxes
      cvZero( gcs->pImgGRAY );
      cvMul( gcs->grabcut_mask,  gcs->grabcut_mask,  gcs->pImgGRAY, 40.0 );
      cvCvtColor( gcs->pImgGRAY, gcs->pImgRGB, CV_GRAY2BGR );
      break;
    case 71:// bboxes applied on the original image
      cvAndS(gcs->grabcut_mask, cvScalar(1), gcs->grabcut_mask, NULL);  // get only FG, PR_FG
      cvMul( gcs->grabcut_mask,  gcs->pImgGRAY,  gcs->pImgGRAY, 1.0 );
      cvCvtColor( gcs->pImgGRAY, gcs->pImgRGB, CV_GRAY2BGR );
      break;
    case 72: // input alpha channel mapped to output
      cvCvtColor( gcs->pImgChX, gcs->pImgRGB, CV_GRAY2BGR );
      break;
#ifdef KMEANS
    case 80:// k-means output
      cvCopy(gcs->pImgRGB_kmeans, gcs->pImgRGB, NULL);
      break;
    case 81:// k-means output filtered with bbox/ghost mask
      cvSplit(gcs->pImgRGB_kmeans, gcs->pImgCh1, gcs->pImgCh2, gcs->pImgCh3, NULL        );
      cvAndS(gcs->grabcut_mask, cvScalar(1), gcs->grabcut_mask, NULL);  // get FG and PR_FG
      cvConvertScale( gcs->grabcut_mask, gcs->grabcut_mask, 255.0);     // scale any to 255.

      cvAnd( gcs->grabcut_mask,  gcs->pImgCh1,  gcs->pImgCh1, NULL );
      cvAnd( gcs->grabcut_mask,  gcs->pImgCh2,  gcs->pImgCh2, NULL );
      cvAnd( gcs->grabcut_mask,  gcs->pImgCh3,  gcs->pImgCh3, NULL );

      cvMerge(              gcs->pImgCh1, gcs->pImgCh2, gcs->pImgCh3, NULL, gcs->pImgRGB);
      break;
#endif //KMEANS
    default:
      break;
    }
  }

  //////////////////////////////////////////////////////////////////////////////
  // copy anyhow the fg/bg to the alpha channel in the output image alpha ch
  cvSplit(gcs->pImgRGB, gcs->pImgCh1, gcs->pImgCh2, gcs->pImgCh3, NULL        );
  cvAndS(gcs->grabcut_mask, cvScalar(1), gcs->grabcut_mask, NULL);  // get only FG and possible FG
  cvConvertScale( gcs->grabcut_mask, gcs->grabcut_mask, 255.0);
  gcs->pImgChA->imageData = (char*)gcs->grabcut_mask->data.ptr;

  cvMerge(              gcs->pImgCh1, gcs->pImgCh2, gcs->pImgCh3, gcs->pImgChA, gcs->pImageRGBA);

  gcs->numframes++;

  GST_GCS_UNLOCK (gcs);  
  
  return GST_FLOW_OK;
}