예제 #1
0
void blur_function(const IplImage *latent_image, IplImage *blur_image, const CvMat *hom1, const CvMat *hom2)
{
	const int T = 20;
	const int tau = 10;
	CvMat *id_mat = cvCreateMat(3, 3, CV_32FC1);
	cvSetIdentity(id_mat, cvRealScalar(1));
	CvMat *invhom1 = cvCreateMat(3, 3, CV_32FC1);
	cvInvert(hom1, invhom1, CV_LU);
	
	CvMat *h1 = cvCreateMat(3, 3, CV_32FC1);
	CvMat *h2 = cvCreateMat(3, 3, CV_32FC1);
	CvSize size = cvSize(latent_image->width, latent_image->height);
	IplImage *temp = cvCreateImage(size, latent_image->depth, latent_image->nChannels);
	IplImage *blur = cvCreateImage(size, IPL_DEPTH_32F, latent_image->nChannels);
	cvSetZero(blur);
	
	for (int i = 1; i <= tau; ++i)
	{
		cvAddWeighted(id_mat, (double)(T-i)/T, invhom1, (double)i/T, 0, h1);
		cvAddWeighted(id_mat, (double)(T-i)/T, hom2, (double)i/T, 0, h2);
		cvWarpPerspective(latent_image, temp, h1, CV_INTER_LINEAR+CV_WARP_FILL_OUTLIERS, cvScalarAll(0));
		cvAdd(blur, temp, blur, NULL);
		cvWarpPerspective(latent_image, temp, h2, CV_INTER_LINEAR+CV_WARP_FILL_OUTLIERS, cvScalarAll(0));
		cvAdd(blur, temp, blur, NULL);
	}
	cvAdd(blur, latent_image, blur, NULL);
	cvConvertScale(blur, blur_image, 1.0/(2*tau+1), 0);
	
	cvReleaseMat(&id_mat);
	cvReleaseMat(&invhom1);
	cvReleaseMat(&h1);
	cvReleaseMat(&h2);
	cvReleaseImage(&temp);
	cvReleaseImage(&blur);
}
예제 #2
0
void ImageProcessorCV::CalculateGradientImageHSV(CByteImage *pInputImage, CByteImage *pOutputImage)
{
	if (pInputImage->width != pOutputImage->width || pInputImage->height != pOutputImage->height ||
		pInputImage->type != CByteImage::eRGB24 || pOutputImage->type != CByteImage::eGrayScale)
		return;

	IplImage *pIplInputImage = IplImageAdaptor::Adapt(pInputImage);
	IplImage *pIplOutputImage = IplImageAdaptor::Adapt(pOutputImage);

	// Determine Gradient Image by Irina Wchter
	// instead of normal norm sqrt(x*x +y*y) use |x|+|y| because it is much faster
	IplImage *singleChannel0 = cvCreateImage(cvSize(pInputImage->width,pInputImage->height), IPL_DEPTH_8U, 1);
	IplImage *singleChannel1 = cvCreateImage(cvSize(pInputImage->width,pInputImage->height), IPL_DEPTH_8U, 1);
	IplImage *singleChannel2 = cvCreateImage(cvSize(pInputImage->width,pInputImage->height), IPL_DEPTH_8U, 1);
	IplImage *diff = cvCreateImage(cvSize(pInputImage->width, pInputImage->height), IPL_DEPTH_16S, 1);
	IplImage *abs = cvCreateImage(cvSize(pInputImage->width, pInputImage->height), IPL_DEPTH_8U, 1);
		
	cvCvtPixToPlane(pIplInputImage, singleChannel0, singleChannel1, singleChannel2, NULL);
	
	// calculate gradients on S-channel
	//cvSmooth(singleChannel1, singleChannel1, CV_GAUSSIAN, 3, 3);
	cvSobel(singleChannel1, diff, 1, 0, 3);
	cvConvertScaleAbs(diff, abs);
	cvSobel(singleChannel1, diff, 0, 1, 3);
	cvConvertScaleAbs(diff, pIplOutputImage);
	cvAdd(abs, pIplOutputImage, pIplOutputImage, 0);
	
	// threshold S-channel for creating a maskfor gradients of H-channel
	cvThreshold(singleChannel1, singleChannel1, 60, 255, CV_THRESH_BINARY);
	cvDilate(singleChannel1, singleChannel1);
	
	// calculate gradients on H-channel
	//cvSmooth(singleChannel0, singleChannel0, CV_GAUSSIAN, 3, 3);
	cvSobel(singleChannel0, diff, 1, 0, 3);
	cvConvertScaleAbs(diff, abs);
	cvSobel(singleChannel0, diff, 0, 1, 3);
	cvConvertScaleAbs(diff, singleChannel0);
	cvAdd(abs, singleChannel0, singleChannel0, 0);
	
	// filter gradients of H-channel with mask
	cvAnd(singleChannel0, singleChannel1, singleChannel0);
	
	// combine to gradient images
	cvMax(pIplOutputImage, singleChannel0, pIplOutputImage);
	
	// free memory
	cvReleaseImage(&singleChannel0);
	cvReleaseImage(&singleChannel1);
	cvReleaseImage(&singleChannel2);
	cvReleaseImage(&diff);
	cvReleaseImage(&abs);
	
	cvReleaseImageHeader(&pIplInputImage);
	cvReleaseImageHeader(&pIplOutputImage);
}
void easyplot(IplImage *fr, IplImage *fr0)
{
	int rmean = 0.5*(r1+r2), rthick = r1-r2;
	CvPoint up, cp, bp;
	
	up.x = coo2pix(upc.x);
	up.y = coo2pix(upc.y);
	
	// pause button
	if(sqr(pbuttonp.x-up.x)+sqr(pbuttonp.y-up.y)<sqr(r1+buttonr)) {
		plot_circular_button(fr, yellow);
	}
	
	// user handle
	cvCircle(fr, up, rmean,   red, rthick+2, CV_AA, 0);
	cvCircle(fr, up, rmean, white, rthick-4, CV_AA, 0);
	
	// computer handle
	cp.x = coo2pix(cpc.x);
	cp.y = coo2pix(cpc.y);
	cvCircle(fr, cp, rmean, green, rthick+2, CV_AA, 0);
	cvCircle(fr, cp, rmean, white, rthick-4, CV_AA, 0);
	
	// ball
	bp.x = coo2pix(bpc.x);
	bp.y = coo2pix(bpc.y);
	if(bp.y>winy+r0) {
		cvCircle(fr, cvPoint(winx/2,winy-bound), criticr, CV_RGB(150,150,150), 10, CV_AA, 0);
		cvCircle(fr, cvPoint(winx/2,winy-bound), explosr, CV_RGB(150,150,150), criticr-explosr, CV_AA, 0);
		explosr+=7;
	}
	else if(bp.y<-r0) {
		cvCircle(fr, cvPoint(winx/2,bound), criticr, CV_RGB(150,150,150), 10, CV_AA, 0);
		cvCircle(fr, cvPoint(winx/2,bound), explosr, CV_RGB(150,150,150), criticr-explosr, CV_AA, 0);
		explosr+=7;
	}
	else {
		cvCircle(fr, bp, r0,  white, -1, CV_AA, 0);
		cvCircle(fr, bp, r0,   blue, 3, CV_AA, 0);
	}
	
	// blur processing
	cvSmooth(fr, fr, CV_BLUR, 15, 15, 0.0, 0.0);
	cvAddWeighted(fr0, 0.55, fr, 1.0, -10.0, fr);
	
	// score
	cvSetImageROI(fr, scoreroi1);
	cvAdd(fr, scoretext1, fr);
	cvSetImageROI(fr, scoreroi2);
	cvAdd(fr, scoretext2, fr);
	cvResetImageROI(fr);
	cvSmooth(fr, fr, CV_BLUR, 5, 5, 0.0, 0.0);
	
	cvCopy(fr, fr0);
}
예제 #4
0
파일: main.cpp 프로젝트: fxia22/tinker
IplImage* stack_imgs( IplImage* img1, IplImage* img2 )
{
	IplImage* stacked = cvCreateImage( cvSize( MAX(img1->width, img2->width), img1->height + img2->height ), IPL_DEPTH_8U, 3 );
	cvZero( stacked );
	cvSetImageROI( stacked, cvRect( 0, 0, img1->width, img1->height ) );
	cvAdd( img1, stacked, stacked, NULL );
	cvSetImageROI( stacked, cvRect(0, img1->height, img2->width, img2->height) );
	cvAdd( img2, stacked, stacked, NULL );
	cvResetImageROI( stacked );
//	cvShowImage( "stack", stacked );
	return stacked;
}
void CueTemplate::adapt() {
	if(!m_init) return;

	CVImage* cvgrayimg = cvGrayImageIn.getBuffer();
	if(!cvgrayimg) { std::cerr<< getName() << "::ERROR::execute()::cvGrayImageIn is NULL!...\n"; return; }
	IplImage* grayimg = cvgrayimg->ipl;

	TrackData* track = trackIn.getBuffer();
//	if(!track){ std::cerr<< getName() << "::ERROR::execute()::trackIn is NULL!...\n"; return; }	

	float rel;
	CvPoint winner;
	if(track) {
		rel = track->reliability;
		winner = track->winnerPos;
	}
	else{
		double min, max;
		CvPoint minLoc, maxLoc;
		cvMinMaxLoc(mp_boundedoutputimg, &min, &max, &minLoc, &maxLoc, NULL);
		rel = (float)max;
		winner = maxLoc;
	}

	if(rel > m_threshold){
		// adapt toward new template
		int x = winner.x;
		int y = winner.y;
		if(x < m_halftemplatesizex) x = m_halftemplatesizex;
		if(y < m_halftemplatesizey) y = m_halftemplatesizey;
		if(x >= grayimg->width - m_halftemplatesizex) x = grayimg->width - m_halftemplatesizex-1;
		if(y >= grayimg->height - m_halftemplatesizey) y = grayimg->height - m_halftemplatesizey-1;
		CvRect rect;
		rect.x = x - m_halftemplatesizex;
		rect.y = y - m_halftemplatesizey;
		rect.width = m_templatesizex;
		rect.height = m_templatesizey;
		cvSetImageROI(grayimg, rect );
		cvCopy( grayimg, mp_newtemplateimg );
		cvScale( mp_templateimg, mp_templateimg, 1.0 - m_tfacs);
		cvScale( mp_newtemplateimg, mp_newtemplateimg, m_tfacs);
		cvAdd( mp_newtemplateimg, mp_templateimg, mp_templateimg );
		cvResetImageROI( grayimg );
		cvTemplateImageOut.out();
	}
	else{
		// adapting back to the original template
		cvScale( mp_templateimg, mp_templateimg, 1.0 - (m_tfacs/m_back) );
		cvScale( mp_origtemplateimg, mp_temptemplateimg, (m_tfacs/m_back) );
		cvAdd( mp_temptemplateimg, mp_templateimg, mp_templateimg );
		cvTemplateImageOut.out();
	}
}
예제 #6
0
파일: utils.cpp 프로젝트: cherip/dct
IplImage *stack(IplImage *img1, IplImage *img2) {
    IplImage *img = cvCreateImage(cvSize(img1->width + img2->width,
                _max(img1->height, img2->height)),
                IPL_DEPTH_8U, 3);
    cvZero(img);
    cvSetImageROI(img, cvRect(0, 0, img1->width, img1->height));
    cvAdd(img1, img, img, NULL);
    cvSetImageROI(img, cvRect(img1->width, 0, img2->width, img2->height));
    cvAdd(img2, img, img, NULL);
    cvResetImageROI(img);

    return img;
}
예제 #7
0
/*(自己写的函数)
将两张图像合成为一张,水平排放
参数:img1:位于左边的图像的指针,img2:位于右边的图像的指针
返回值:合成图像
*/
extern IplImage* stack_imgs_horizontal( IplImage* img1, IplImage* img2 )
{
    //生成合成图像
    IplImage * stacked = cvCreateImage(cvSize(img1->width+img2->width, MAX(img1->height,img2->height)),
                                       IPL_DEPTH_8U, 3);
    cvZero(stacked);//清零
    cvSetImageROI(stacked, cvRect(0,0,img1->width,img1->height));
    cvAdd(img1,stacked,stacked,NULL);//叠加第一张图像
    cvSetImageROI(stacked, cvRect(img1->width,0,img2->width,img2->height));
    cvAdd(img2,stacked,stacked,NULL);//叠加第二张图像
    cvResetImageROI(stacked);

    return stacked;
}
예제 #8
0
파일: utils.cpp 프로젝트: cherip/dct
IplImage* stack_imgs(const IplImage* img1, const IplImage* img2 )
{
  IplImage* stacked = cvCreateImage( cvSize( img1->width + img2->width,
					     MAX(img1->height, img2->height) ),
				     IPL_DEPTH_8U, 3 );

  cvZero( stacked );
  cvSetImageROI( stacked, cvRect( 0, 0, img1->width, img1->height ) );
  cvAdd( img1, stacked, stacked, NULL );
  cvSetImageROI( stacked, cvRect(img1->width, 0, img2->width, img2->height) );
  cvAdd( img2, stacked, stacked, NULL );
  cvResetImageROI( stacked );

  return stacked;
}
예제 #9
0
파일: Segment.cpp 프로젝트: gimlids/LTPM
Segment* Segment::combine(Segment *s1, Segment *s2)
{
	Segment* newSegment = new Segment(*s1->seg, s1->label);

	IplImage* maskAddition = cvCreateImage(cvSize(s1->seg->width, s1->seg->height), IPL_DEPTH_8U, 1);
	cvAdd(s1->iplMask, s2->iplMask, maskAddition, NULL);
	cvReleaseImage(&s1->iplMask);
	newSegment->iplMask = maskAddition;
	newSegment->mask = BwImage(newSegment->iplMask);
	
	// weighted average colors
	int s1PixelCount = s1->pixels.size();
	int s2PixelCount = s2->pixels.size();
	int totalPixelCount = s1PixelCount + s2PixelCount;
	float s1Weight = float(s1PixelCount) / float(totalPixelCount);
	float s2Weight = float(s2PixelCount) / float(totalPixelCount);
	newSegment->color.r = s1->color.r * s1Weight + s2->color.r * s2Weight;
	newSegment->color.g = s1->color.g * s1Weight + s2->color.g * s2Weight;
	newSegment->color.b = s1->color.b * s1Weight + s2->color.b * s2Weight;

	newSegment->pixels.insert(newSegment->pixels.end(), s1->pixels.begin(), s1->pixels.end());
	newSegment->pixels.insert(newSegment->pixels.end(), s2->pixels.begin(), s2->pixels.end());

	newSegment->updateContour();

	return newSegment;
}
/* Tool function */
void motionDetection::accFrameFromVideo(CvCapture* capture){

	//cvNamedWindow( "Video", CV_WINDOW_AUTOSIZE ); // Create a window to display the video 

	while (mCount != mFrameNumber)
	{
		if (cvGrabFrame(capture))
		{
			mFrame = cvRetrieveFrame(capture);
			// convert rgb to gray 
			cvCvtColor(mFrame, mpFrame[mCount], CV_BGR2GRAY);
			// accumulate each frame
			cvAdd(mSum, mpFrame[mCount], mSum);
			//cvShowImage( "Video", mpFrame[mCount] );  // display current frame 

			++mCount;
			if (cvWaitKey(10) >= 0) {
				break;
			}
		}
		else {
			break;
		}
	}
	//cvDestroyWindow( "Video" );
}
예제 #11
0
Img GaborImage::GaborTransform(Img Image, int Frequency, int Orientation) {
	orientation = Orientation;
	CalculateKernel(Orientation, Frequency);

	Img retImg  = (IplImage*) cvClone(Image);
	
	Img gabor_real = (IplImage*) cvClone(Image);
	Img gabor_img  = (IplImage*) cvClone(Image);
	cvFilter2D(Image, gabor_real, KernelRealData);	//image.Convolution(this.KernelRealData);
	cvFilter2D(Image, gabor_img , KernelImgData);	//image.Convolution(this.KernelImgData);
	
	cvPow(gabor_real, gabor_real, 2);
	cvPow(gabor_img,  gabor_img,  2);
	
	// Img gabor = (gabor_real + gabor_img).Pow(0.5);
	cvAdd(gabor_real, gabor_img, retImg);
	
	cv::Mat in = retImg;
	cv::Mat out;
	cv::sqrt(in, out); 
	
	IplImage dst_img = out;	
	
	cvReleaseImage(&gabor_real);
	cvReleaseImage(&gabor_img);
	
	retImg = (IplImage*) cvClone(&dst_img);
	
	return retImg;
}
/* Standard Deviation */
IplImage* motionDetection::getStandardDeviationFrame(void) {

	// Initialize
	cvZero(mSum);
	for (int i = 0; i < mFrameNumber; ++i) {
		// frame[i] <= | frame[i] - Background Model |
		cvAbsDiff(mpFrame[i], m_imgBackgroundModel, mTmp8U);
		// uchar->float
		cvConvert(mTmp8U, mTmp);
		// mTmp = mTmp * mTmp 
		cvPow(mTmp, mTmp, 2.0);
		// add mSum += mTmp
		cvAdd(mSum, mTmp, mSum);
	}

	// variance: mTmp <= mSum / (mFrameNumber-1)
	for (int i = 0; i < mSize.height; ++i) {
		for (int j = 0; j < mSize.width; ++j) {
			((float*)(mTmp->imageData + i*mTmp->widthStep))[j] = ((float*)(mSum->imageData + i*mSum->widthStep))[j] / (mFrameNumber - 1);
		}
	}

	// standard deviation
	cvPow(mTmp, mTmp, 0.5);

	// float->uchar
	cvConvert(mTmp, m_imgStandardDeviation);

	return m_imgStandardDeviation;
}
//get the stereo pair and create the anaglyph
void createAnaglyph(IplImage *frameL, IplImage *frameR, IplImage **anaglyph){
    IplImage *grayR, *grayL;
    
    CvSize size = cvGetSize(frameL);
    grayR = cvCreateImage(size, frameL->depth, 1);
    grayL = cvCreateImage(size, frameL->depth, 1);
    
    //convert images to grayscale
    cvCvtColor(frameR, grayR, CV_BGR2GRAY);
    cvCvtColor(frameL, grayL, CV_BGR2GRAY);
    
    //revert to RGB (grayscale with 3 channels, all have the same pixel value)
    cvCvtColor(grayR, frameR, CV_GRAY2BGR);
    cvCvtColor(grayL ,frameL, CV_GRAY2BGR);
    
    //remove channels
    for(int row = 0; row < frameL->height; row++){
            //set pointer to the correct position in each row
            uchar* ptrR = (uchar*)(frameR->imageData + row * frameR->widthStep);
            uchar* ptrL = (uchar*)(frameL->imageData + row * frameL->widthStep);
            
            for(int col = 0; col < frameL->width; col++){
                    //remove blue and green channel from the right image
                    ptrR[3*col] = 0;
                    ptrR[3*col+1] = 0;
                    //remove red channel from the left image
                    ptrL[3*col+2] = 0;
            }            
    }
    
    //junctions images
    cvAdd(frameR, frameL, *anaglyph);    
}
//get the stereo pair and create the anaglyph
void createAnaglyph(IplImage *frameL, IplImage *frameR){
    IplImage *anaglyph;

    //remove channels
    for(int row = 0; row < frameL->height; row++){
            //set pointer to the correct position in each row
            uchar* ptrR = (uchar*)(frameR->imageData + row * frameR->widthStep);
            uchar* ptrL = (uchar*)(frameL->imageData + row * frameL->widthStep);

            for(int col = 0; col < frameL->width; col++){
                    //remove blue and green channel from the right image
                    ptrR[3*col] = 0;
                    ptrR[3*col+1] = 0;
                    //remove red channel from the left image
                    ptrL[3*col+2] = 0;

                    /*//uncomment to obtain the green-magenta anaglyph
                    ptrR[3*col] = 0;
                    ptrR[3*col+2] = 0;
                    ptrL[3*col+1] = 0;*/
            }
    }

    //prepare anaglyph image
    CvSize size = cvGetSize(frameL);
    anaglyph = cvCreateImage(size, frameL->depth, frameL->nChannels);
    cvZero(anaglyph);

    //junctions images
    cvAdd(frameR, frameL, anaglyph);

    //save junctioned image
    cvSaveImage("anaglyph.bmp", anaglyph);
}
void THISCLASS::OnStep() {
	// Get and check input image
	IplImage *inputimage = mCore->mDataStructureImageColor.mImage;
	if (! inputimage) {
		AddError(wxT("No input image."));
		return;
	}
	if (inputimage->nChannels != 3) {
		AddError(wxT("The input image is not a color image."));
		return;
	}

	// Check and update the background
	if (! mFinalImage) {
		mFinalImage = cvCloneImage(inputimage);
	} else if (mMode == sMode_Addition) {
		cvAdd(mFinalImage, inputimage, mFinalImage);
	} else if (mMode == sMode_Subtraction) {
		cvSub(mFinalImage, inputimage, mFinalImage);
	} else if (mMode == sMode_Multiplication) {
	}

	// Set the display
	DisplayEditor de(&mDisplayOutput);
	if (de.IsActive()) {
		de.SetMainImage(mFinalImage);
	}
}
예제 #16
0
//--------------------------------------------------------------------------------
void ofxCvImage::operator += ( ofxCvImage& mom ) {
	if( !mom.bAllocated ){
		ofLogError("ofxCvImage") << "operator+=: source image not allocated";	
		return;	
	}
	if( !bAllocated ){
		ofLogNotice("ofxCvImage") << "operator+=: allocating to match dimensions: "
			<< mom.getWidth() << " " << mom.getHeight();
		allocate(mom.getWidth(), mom.getHeight());
	}

	if( mom.getCvImage()->nChannels == cvImage->nChannels &&
        mom.getCvImage()->depth == cvImage->depth )
    {
        if( matchingROI(getROI(), mom.getROI()) ) {
            cvAdd( cvImage, mom.getCvImage(), cvImageTemp );
            swapTemp();
            flagImageChanged();
        } else {
            ofLogError("ofxCvImage") << "operator+=: region of interest mismatch";
        }
	} else {
        ofLogError("ofxCvImage") << "operator+=: image type mismatch";
	}
}
예제 #17
0
int main() {
    CvCapture* capture =0;
    capture = cvCaptureFromCAM(0);
    if(!capture) {
       printf("Capture failure\n");
       return -1;
    }
    
    IplImage* frame=0;
    frame = cvQueryFrame(capture);
    if(!frame) return -1;
   
   //create a blank image and assigned to 'imgTracking' which has the same size of original video
   imgTracking=cvCreateImage(cvGetSize(frame),IPL_DEPTH_8U, 3);
   cvZero(imgTracking); //covert the image, 'imgTracking' to black

   cvNamedWindow("Video");
   cvNamedWindow("Ball");

      //iterate through each frames of the video
   while(true) {
      frame = cvQueryFrame(capture);
      if(!frame) break;
      frame=cvCloneImage(frame);

      cvSmooth(frame, frame, CV_GAUSSIAN,3,3); //smooth the original image using Gaussian kernel

      IplImage* imgHSV = cvCreateImage(cvGetSize(frame), IPL_DEPTH_8U, 3);
      cvCvtColor(frame, imgHSV, CV_BGR2HSV); //Change the color format from BGR to HSV
      IplImage* imgThresh = GetThresholdedImage(imgHSV);
        
      cvSmooth(imgThresh, imgThresh, CV_GAUSSIAN,3,3); //smooth the binary image using Gaussian kernel
          
      //track the possition of the ball
      trackObject(imgThresh);

      // Add the tracking image and the frame
      cvAdd(frame, imgTracking, frame);

      cvShowImage("Ball", imgThresh);
      cvShowImage("Video", frame);
         
         //Clean up used images
      cvReleaseImage(&imgHSV);
      cvReleaseImage(&imgThresh);
      cvReleaseImage(&frame);

      //Wait 10mS
      int c = cvWaitKey(10);
      //If 'ESC' is pressed, break the loop
      if((char)c==27 ) break;
    }

    cvDestroyAllWindows();
    cvReleaseImage(&imgTracking);
    cvReleaseCapture(&capture);

    return 0;
}
예제 #18
0
//--------------------------------------------------------------------------------
void ofxCvFloatImage::operator += ( ofxCvFloatImage& mom ) {
	if( mom.width == width && mom.height == height ) {
		cvAdd( cvImage, mom.getCvImage(), cvImageTemp );
		swapTemp();
	} else {
        cout << "error in +=, images are different sizes" << endl;
	}
}
예제 #19
0
//形态学粗化不匹配运算
void lhMorpThickMiss(const IplImage* src, IplImage* dst, IplConvKernel* sefg, IplConvKernel* sebg =NULL, int type=LH_MORP_TYPE_BINARY)
{

	assert(src != NULL && dst != NULL && src != dst && sefg!= NULL && sefg!=sebg);

	lhMorpHMTOpen(src, dst, sefg, NULL, type);
	cvAdd(src, dst, dst);
}
예제 #20
0
CvMat * LKInverseComp::computeShape(float *parameters)
{



    IplImage * currentShape = cvCreateImage(cvSize(totalnumberofpoints,1),IPL_DEPTH_64F,1);
    cvZero(currentShape);

    IplImage * temp;


    for (int m=0;m<(nS+4);m++)
    {

        temp = cvCloneImage(combineshapevectors[m]);
        cvConvertScale(combineshapevectors[m],temp,parameters[m]);

//        for (int k=0;k<combineshapevectors[m]->width;k++)
//        {
//            for (int d=0;d<combineshapevectors[m]->height;d++)
//            {
//                CvScalar s;
//                s=cvGet2D(combineshapevectors[m],d,k);
//                s.val[0]*=parameters[m];
//                cvSet2D(temp,d,k,s);
//            }
//        }
        cvAdd(temp,currentShape,currentShape,0);
    }

    cvAdd(meanShape,currentShape,currentShape,0);
    static CvMat * mat = cvCreateMat(numberofpoints,2,CV_64FC1);
    for (int m=0;m<numberofpoints;m++)
    {
        CvScalar s1 = cvGet2D(currentShape,0,2*m);
        CvScalar s2 = cvGet2D(currentShape,0,2*m +1);

        cvSet2D(mat,m,0,s1);

        cvSet2D(mat,m,1,s2);
    }
    cvReleaseImage(&currentShape);
    return mat;

}
예제 #21
0
void HarrisBuffer::HarrisFunction(double k, IplImage* dst)
{
  // Harris function in 3D
  // original space-time Harris
  /*detC=  
  cxx.*cyy.*ctt +		xx yy tt
  cxy.*cyt.*cxt +		2 * xy yt xt
  cxt.*cxy.*cyt -		.
  cxx.*cyt.*cyt -		xx yt^2
  cxy.*cxy.*ctt -		tt xy^2	
  cxt.*cyy.*cxt ;		yy xt^2
  */
  cvMul(cxx, cyy, tmp1);
  cvMul(ctt, tmp1, tmp1);

  cvMul(cxy, cxt, tmp2);
  cvMul(cyt, tmp2, tmp2,2);

  cvAdd(tmp1,tmp2,tmp1);

  cvMul(cyt,cyt,tmp2);
  cvMul(cxx,tmp2,tmp2);

  cvSub(tmp1,tmp2,tmp1);

  cvMul(cxy,cxy,tmp2);
  cvMul(ctt,tmp2,tmp2);

  cvSub(tmp1,tmp2,tmp1);

  cvMul(cxt,cxt,tmp2);
  cvMul(cyy,tmp2,tmp2);

  cvSub(tmp1,tmp2,tmp1);

  //trace3C=(cxx+cyy+ctt).^3;
  cvAdd(cxx,cyy,tmp2);
  cvAdd(ctt,tmp2,tmp2);
  cvPow(tmp2,tmp2,3);

  //H=detC-stharrisbuffer.kparam*trace3C;
  cvScale(tmp2,tmp2,k,0);
  cvSub(tmp1,tmp2,dst);
}
예제 #22
0
//形态学对比度增强运算
void lhMorpEnhance(const IplImage* src, IplImage* dst, IplConvKernel* se=NULL, int iterations=1)
{
	assert(src != NULL && dst != NULL && src != dst);
	IplImage*  temp = cvCloneImage(src);
    lhMorpWhiteTopHat( src, temp, se, iterations );
    lhMorpBlackTopHat( src, dst, se, iterations );
	cvAdd(src, temp, temp);
    cvSub(temp, dst, dst );
	cvReleaseImage(&temp);
}
예제 #23
0
파일: cvl1qc.cpp 프로젝트: caomw/l1cs
static void icvH11Ops( CvMat* X, CvMat* Y, void* userdata )
{
	CvH11OpsData* h11 = (CvH11OpsData*)userdata;
	h11->AOps( X, h11->AR, h11->userdata );
	h11->AtOps( h11->AR, h11->AtR, h11->userdata );
	double rc = h11->fe_inv_2 * cvDotProduct( h11->atr, X );
	cvAddWeighted( h11->AtR, -h11->fe_inv, h11->atr, rc, 0, h11->AtR );
	cvMul( h11->sigx, X, h11->tX );
	cvAdd( h11->tX, h11->AtR, Y );
}
예제 #24
0
파일: Kalman.cpp 프로젝트: tzwenn/alvar
void KalmanSensorEkf::update_H(CvMat *x_pred) {
	// By default we update the H by calculating Jacobian numerically
	const double step = 0.000001;
	cvZero(H);
	for (int i=0; i<n; i++) {
		CvMat H_column;
		cvGetCol(H, &H_column, i);

		cvZero(delta); 
		cvmSet(delta, i, 0, step);
		cvAdd(x_pred, delta, x_plus);
		cvmSet(delta, i, 0, -step);
		cvAdd(x_pred, delta, x_minus);

		h(x_plus, z_tmp1);  
		h(x_minus, z_tmp2);	
		cvSub(z_tmp1, z_tmp2, &H_column);
		cvScale(&H_column, &H_column, 1.0/(2*step));
	}
}
예제 #25
0
IplImage* GetThresholdedImage(IplImage* img)
{
	IplImage* channelRed = cvCreateImage(cvGetSize(img),8,1);
	IplImage* channelGreen = cvCreateImage(cvGetSize(img),8,1);	
	IplImage* channelBlue = cvCreateImage(cvGetSize(img),8,1);
	cvSplit(img,channelBlue,channelGreen,channelRed,NULL);
	cvAdd(channelBlue,channelGreen,channelGreen);		//Green = Green+Blue
	cvSub(channelRed,channelGreen,channelRed);		//Red = Red - Green, Filtering out white area
	cvThreshold(channelRed,channelRed,100,255,CV_THRESH_BINARY); //Threshold for identify Red Object	
	cvReleaseImage(&channelGreen);
	cvReleaseImage(&channelBlue);
	return channelRed;
}
예제 #26
0
static void dance_measurement(const CvMat* x_k,
                              const CvMat* n_k,
                              CvMat* z_k)
{
    cvSetReal2D(z_k, 0, 0, cvGetReal2D(x_k, 0, 0));
    cvSetReal2D(z_k, 1, 0, cvGetReal2D(x_k, 1, 0));

    /* as above, skip this step when n_k is null */
    if(n_k)
    {
        cvAdd(z_k, n_k, z_k);
    }
}
예제 #27
0
파일: Kalman.cpp 프로젝트: tzwenn/alvar
void KalmanEkf::update_F(unsigned long tick) {
	// By default we update the F by calculating Jacobian numerically
	// TODO
	double dt = (tick-prev_tick)/1000.0;
	const double step = 0.000001;
	cvZero(F);
	for (int i=0; i<n; i++) {
		CvMat F_column;
		cvGetCol(F, &F_column, i);

		cvZero(delta); 
		cvmSet(delta, i, 0, step);
		cvAdd(x, delta, x_plus);
		cvmSet(delta, i, 0, -step);
		cvAdd(x, delta, x_minus);

		f(x_plus, x_tmp1, dt);  
		f(x_minus, x_tmp2, dt);	
		cvSub(x_tmp1, x_tmp2, &F_column);
		cvScale(&F_column, &F_column, 1.0/(2*step));
	}
}
예제 #28
0
파일: cvgabor.cpp 프로젝트: ToMadoRe/v4r
/**
 * @brief CvGabor::conv_img(IplImage *src, IplImage *dst, int Type)
 * @param src
 * @param dst
 * @param Type
 */
void CvGabor::conv_img(IplImage *src, IplImage *dst, int Type)   //函数名:conv_img
{
// printf("CvGabor::conv_img 1\n");
  double ve; //, re,im;
  
  CvMat *mat = cvCreateMat(src->width, src->height, CV_32FC1);
  for (int i = 0; i < src->width; i++) {
    for (int j = 0; j < src->height; j++) {
      ve = CV_IMAGE_ELEM(src, uchar, j, i);   //CV_IMAGE_ELEM 是取图像(j,i)位置的像素值
      CV_MAT_ELEM(*mat, float, i, j) = (float)ve;  //转化成float 类型
    }
  }
  
// printf("CvGabor::conv_img 2\n");
  CvMat *rmat = cvCreateMat(src->width, src->height, CV_32FC1);
  CvMat *imat = cvCreateMat(src->width, src->height, CV_32FC1);
  
  switch (Type)
  {
    case CV_GABOR_REAL:
      cvFilter2D( (CvMat*)mat, (CvMat*)mat, (CvMat*)Real, cvPoint( (Width-1)/2, (Width-1)/2));
      break;
    case CV_GABOR_IMAG:
      cvFilter2D( (CvMat*)mat, (CvMat*)mat, (CvMat*)Imag, cvPoint( (Width-1)/2, (Width-1)/2));
      break;
    case CV_GABOR_MAG:
      cvFilter2D( (CvMat*)mat, (CvMat*)rmat, (CvMat*)Real, cvPoint( (Width-1)/2, (Width-1)/2));
      cvFilter2D( (CvMat*)mat, (CvMat*)imat, (CvMat*)Imag, cvPoint( (Width-1)/2, (Width-1)/2));
      
      cvPow(rmat,rmat,2); 
      cvPow(imat,imat,2);
      cvAdd(imat,rmat,mat); 
      cvPow(mat,mat,0.5); 
      break;
    case CV_GABOR_PHASE:
      break;
  }
  
// printf("CvGabor::conv_img 3\n");
  if (dst->depth == IPL_DEPTH_8U)
  {
    cvNormalize((CvMat*)mat, (CvMat*)mat, 0, 255, CV_MINMAX);
    for (int i = 0; i < mat->rows; i++)
    {
      for (int j = 0; j < mat->cols; j++)
      {
        ve = CV_MAT_ELEM(*mat, float, i, j);
        CV_IMAGE_ELEM(dst, uchar, j, i) = (uchar)cvRound(ve);
      }
    }
  }
예제 #29
0
CV_IMPL void cvCalS(const CvArr* srcarr,
                    CvArr* dstarr)
{
    CV_FUNCNAME("cvCalS");
    
    __BEGIN__;
    CvMat sstub, *src;
    CvMat dstub, *dst;
    CvMat* src_dx=0, *src_dy=0;
    CvSize size;
    int i, j;
    int iStep;
    float* fPtr;
    
    CV_CALL( src = cvGetMat(srcarr, &sstub ));
    CV_CALL( dst = cvGetMat(dstarr, &dstub ));
    
    if( CV_MAT_TYPE(src->type) != CV_32FC1)
        CV_ERROR( CV_StsUnsupportedFormat, "Only-32bit, 1-channel input images are supported" );
    
    if( CV_MAT_TYPE(dst->type) != CV_32FC1)
        CV_ERROR( CV_StsUnsupportedFormat, "Only-32bit, 1-channel input images are supported" );
    
    if( !CV_ARE_SIZES_EQ( src, dst ))
        CV_ERROR( CV_StsUnmatchedSizes, "The input images must have the same size" );
    
    size = cvGetMatSize( src );
    
    src_dx  = cvCreateMat(size.height, size.width, CV_32FC1 );
    src_dy  = cvCreateMat(size.height, size.width, CV_32FC1 );
    cvSetZero(src_dx);
    cvSetZero(src_dy);
    
    iStep = dst->step / sizeof(fPtr[0]);
    fPtr = dst->data.fl;
    
    cvSobel(src, src_dx, 1, 0, 1);
    cvSobel(src, src_dy, 0, 1, 1);
    cvMul(src_dx, src_dx, src_dx, 0.25f*0.25f); //rescale gradient
    cvMul(src_dy, src_dy, src_dy, 0.25f*0.25f); //rescale gradient
    cvAdd(src_dx, src_dy, dst);
    
    for(j=0; j<size.height; j++){
        for (i=0; i<size.width; i++)
            fPtr[i+iStep*j] = sqrt(fPtr[i+iStep*j])+SMALLNUM;
    }
    cvReleaseMat(&src_dx);
    cvReleaseMat(&src_dy);
    
    __END__;
}
예제 #30
0
void FacePredict::CalcNewTextureParams(CvMat* curParam, CvMat* newParam, int curAgeG, int newAgeG)
{
	CvMat* diff = cvCreateMat(1, __nTextureModes, CV_64FC1);
	CvMat* curClassicP = cvCreateMat(1, __nTextureModes, CV_64FC1);
	CvMat* newClassicP = cvCreateMat(1, __nTextureModes, CV_64FC1);
	cvGetRow(__TextureParamGroups, curClassicP, curAgeG);
	cvGetRow(__TextureParamGroups, newClassicP, newAgeG);
	cvSub(newClassicP, curClassicP, diff);
	cvAdd(curParam, diff, newParam);

	cvReleaseMat(&diff);
	cvReleaseMat(&curClassicP);
	cvReleaseMat(&newClassicP);
}