IplImage* StepPreprAdaptBin::DoPrepr(IplImage *src1, IplImage *src2)
{
	if (m_sizeData.SizeChanged(src1))
		PrepareImg(src1);

	int side = m_adaptThreshSide;
	CvRect ROI;
	ROI.width = ROI.height = side;

	double global_mean = cvAvg(src2).val[0];
	double local_mean;
	float threshold;

	for (int x = 0; x < src2->width; x += side)
		for (int y = 0; y < src2->height; y += side)
	{
		ROI.x = x;
		ROI.y = y;
		cvSetImageROI(src1, ROI);
		cvSetImageROI(src2, ROI);
		cvSetImageROI(m_resultImg, ROI);
		local_mean = cvAvg(src2).val[0];
		threshold = (local_mean / global_mean) * (float) m_binThresh;
		cvThreshold(src1, m_resultImg, threshold, 255.0, CV_THRESH_BINARY);	
	}
	
	cvResetImageROI(src1);
	cvResetImageROI(src2);
	cvResetImageROI(m_resultImg);
	return m_resultImg;
}
void thresholdCalculator::calculateAverages(ofxCvGrayscaleAdvanced & smallCurrentImg, ofxCvGrayscaleAdvanced & maskImg, ofRectangle & targetRect) {
	
	roi.x = targetRect.x / divisor;
	roi.y = targetRect.y / divisor;
	
	maskImg.setROI(roi);
	smallCurrentImg.setROI(roi);
	
	CvScalar tempPupilAvg = cvAvg(smallCurrentImg.getCvImage(), maskImg.getCvImage());
	cvNot(maskImg.getCvImage(), notDiffImg.getCvImage());
	pupilAvg = tempPupilAvg.val[0];
	
	// get average of pupil black iteratively(get average twice) to remove the influence of glint
	cvThreshold(smallCurrentImg.getCvImage(), farFromAvg, pupilAvg + 30, 255, CV_THRESH_BINARY);		// 30 is the distance from average.
	cvSub(maskImg.getCvImage(), farFromAvg, newMask);								// make a mask to get rid of those far points.
	CvScalar newPupilAvg = cvAvg(smallCurrentImg.getCvImage(), newMask);			// get new average value.
	
	// get average, min and max value of white area of an eye.
	CvScalar tempWhiteAvg = cvAvg(smallCurrentImg.getCvImage(), notDiffImg.getCvImage());
	for (int i = 0; i < 6; i++) notDiffImg.erode();				// this might be very useful to reduce the influence of small noise & glint
	cvMinMaxLoc(smallCurrentImg.getCvImage(), &whiteMin, &whiteMax, &whiteLocMin, &whiteLocMax, notDiffImg.getCvImage());

	maskImg.resetROI();
	smallCurrentImg.resetROI();
	
	pupilAvg = newPupilAvg.val[0];					// value is in the first element of CvScalar
	whiteAvg = tempWhiteAvg.val[0];
	
}
void THISCLASS::OnStep() {
	// Get and check input image
	IplImage *inputimage = mCore->mDataStructureImageColor.mImage;
	if (! inputimage) {
		AddError(wxT("No input image."));
		return;
	}
	if (inputimage->nChannels != 3) {
		AddError(wxT("The input image is not a color image."));
		return;
	}

	// Check and update the background
	if (! mOutputImage) {
	  mOutputImage = cvCloneImage(inputimage);
	} else {
	  cvCopyImage(inputimage, mOutputImage);
	}
	if (! mBackgroundImage) {
		mBackgroundImage = cvCloneImage(mOutputImage);
	} else if (mUpdateProportion > 0) {
		if ((cvGetSize(mOutputImage).height != cvGetSize(mBackgroundImage).height) || (cvGetSize(mOutputImage).width != cvGetSize(mBackgroundImage).width)) {
			AddError(wxT("Input and background images do not have the same size."));
			return;
		}

		cvAddWeighted(mOutputImage, mUpdateProportion, mBackgroundImage, 1.0 - mUpdateProportion, 0, mBackgroundImage);
	}

	try {
		// Correct the tmpImage with the difference in image mean
		if (mCorrectMean) {
			mBackgroundImageMean = cvAvg(mBackgroundImage);
			CvScalar tmpScalar = cvAvg(mOutputImage);
			cvAddS(mOutputImage, cvScalar(mBackgroundImageMean.val[0] - tmpScalar.val[0], mBackgroundImageMean.val[1] - tmpScalar.val[1], mBackgroundImageMean.val[2] - tmpScalar.val[2]), mOutputImage);
		}

		// Background subtraction
		if (mMode == sMode_SubImageBackground) {
			cvSub(mOutputImage, mBackgroundImage, mOutputImage);
		} else if (mMode == sMode_SubBackgroundImage) {
			cvSub(mBackgroundImage, mOutputImage, mOutputImage);
		} else {
			cvAbsDiff(mOutputImage, mBackgroundImage, mOutputImage);
		}
	} catch (...) {
		AddError(wxT("Background subtraction failed."));
	}
	mCore->mDataStructureImageColor.mImage = mOutputImage;
	// Set the display
	DisplayEditor de(&mDisplayOutput);
	if (de.IsActive()) {
		de.SetMainImage(mOutputImage);
	}
}
Example #4
0
/*************************************************************************
* @函数名称:
*	calLuminanceSim()
* @输入:
*   IplImage* input1            - 输入图像1
*   IplImage* input2            - 输入图像2
* @返回值: 
*   double                      - 返回图像亮度相似性
* @说明:
*   计算图像的亮度相似度,符合亮度掩盖模型
*	计算公式为:l(x,y)=(2*u_x*u_y+c1)/(u_x*u_x+u_y*u_y+c1)
*************************************************************************/
double calLuminanceSim(const IplImage* input1, const IplImage* input2)
{
	double lum=0,c1=0;
	double k1=0.01;
	CvScalar mean1,mean2;

	mean1=cvAvg(input1);
	mean2=cvAvg(input2);
	c1=(k1*255)*(k1*255);
	lum=(2*mean1.val[0]*mean2.val[0]+c1)/(mean1.val[0]*mean1.val[0]+mean2.val[0]*mean2.val[0]+c1);
	return lum;
}
Example #5
0
/// <summary>
///光照归一化处理
///1.转换色彩空间到HSV空间;
///2.把HSV空间的V值设置为固定的值IlluminationThreshold;
///3.再从HSV空间转换到RGB空间;
/// </summary>
void CLightSet::LightNormalization(IplImage* src, IplImage* dst, int threshold){
	ASSERT(src->nChannels==3);
	//转换色彩空间
	cvCvtColor(src,dst,CV_RGB2HSV);
	//分离通道
	IplImage* imgChannel[3] = { 0, 0, 0 };  

	for (int i=0;i<dst->nChannels;i++)
	{
		imgChannel[i] = cvCreateImage(cvGetSize( dst ), IPL_DEPTH_8U, 1);  //要求单通道图像才能直方图均衡化  
	}

	cvSplit(dst, imgChannel[0], imgChannel[1], imgChannel[2],0);//HSVA  

	CvScalar avg=cvAvg(imgChannel[2]);
	cvCvtScale(imgChannel[2],imgChannel[2],1.0,threshold-avg.val[0]);
	cvMerge( imgChannel[0], imgChannel[1], imgChannel[2], 0, src );  		

	cvCvtColor(dst,dst,CV_HSV2RGB);

	for (int i=0;i<dst->nChannels;i++)
	{
		cvReleaseImage(&imgChannel[i] ); 
	}
}
Example #6
0
//--------------------------------------------------------------
void testApp::update()
{
	while(!drawThis.empty())
	{
		quadtree *item;
		item=drawThis.front();
		drawThis.pop();
		CvScalar col[4];

		item->part();
		for(int i=0;i<4;i++)
		{
			cvZero(temp);
			cvRectangle(temp, cvPoint(item->child[i]->x1,item->child[i]->y1),
				cvPoint(item->child[i]->x2,item->child[i]->y2), cvScalar(255,255,255,255),
				CV_FILLED, 8, 0);

			col[i] = cvAvg(img, temp);

			cvRectangle(disp,cvPoint(item->child[i]->x1,item->child[i]->y1),
				cvPoint(item->child[i]->x2,item->child[i]->y2), col[i],
				CV_FILLED, 8, 0 );
		}
		image=disp;
	}
}
Example #7
0
//============================================================================
void AAM_TDM::ZeroMeanUnitLength(CvMat* Texture)
{
	CvScalar mean =  cvAvg(Texture);
	cvSubS(Texture, mean, Texture);
	double norm = cvNorm(Texture);
	cvConvertScale(Texture, Texture, 1.0/norm);
}
Example #8
0
void avgCal( int argc, char** argv )
{
	char *iamgePath = argv[1];
	char *savePath = argv[2];

	//load image
	IplImage* bgr = NULL;
	bgr = cvLoadImage( iamgePath);

	//convert rgb2hsv
	IplImage* bgr32f, * hsv;
	bgr32f = cvCreateImage( cvGetSize(bgr), IPL_DEPTH_32F, 3 );
	hsv = cvCreateImage( cvGetSize(bgr), IPL_DEPTH_32F, 3 );
	cvConvertScale( bgr, bgr32f, 1.0 / 255.0, 0 );//convert之前要进行缩放,将取值范围变换到0~1
	cvCvtColor( bgr32f, hsv, CV_BGR2HSV );

	cvNamedWindow("canny",1);
	//cvShowImage( "canny", hsv );
	//cvWaitKey(0);
	cvReleaseImage( &bgr32f );
	//cal the average value of every channel
	CvScalar val = cvAvg(hsv);
	//	val[0]	double	83.488760100210129
	//	val[1]	double	0.53591686981233155
	//	val[2]	double	0.53942276813455614
	//关于opencv中HSV的取值范围的讨论请参见这篇文章http://blog.csdn.net/dark_blue_sea/article/details/5251763

	//release
	cvReleaseImage(&hsv);
	cvReleaseImage( &bgr );
}
Example #9
0
float dmz_brightness_score_for_image(IplImage *image) {
  assert(image->nChannels == 1);
  assert(image->depth == IPL_DEPTH_8U);
  
  // could Neon and/or GPU this; however, this call to cvAvg apparently has NO effect on FPS (iPhone 4S)
  CvScalar mean = cvAvg(image, NULL);
  return (float)mean.val[0];
}
Example #10
0
void Classifier::optical_flow(const IplImage *frame, double *xD, double *yD) {
    double xDiff = 0;
  double yDiff = 0;
  //double xQDiff = 0;
  //double yQDiff = 0;
  if (prevFrame) {
	  /* Optical flow for entire image */
	  CvSize img_sz = cvGetSize(frame);
	  
	  IplImage *imgA = cvCreateImage(img_sz, IPL_DEPTH_8U, 1);
	  IplImage *imgB = cvCreateImage(img_sz, IPL_DEPTH_8U, 1);
	  
	  cvCvtColor(frame, imgA, CV_BGR2GRAY);
	  cvCvtColor(prevFrame, imgB, CV_BGR2GRAY);
	  
	  CvMat* velx = cvCreateMatHeader( img_sz.height, img_sz.width, CV_32FC1 );   
      cvCreateData( velx );   
      CvMat* vely = cvCreateMatHeader( img_sz.height, img_sz.width, CV_32FC1 );   
      cvCreateData( vely );
	  
	  cvCalcOpticalFlowLK(
		imgA,
		imgB,
		cvSize(15, 15),
		velx,
		vely
	  );
	  
	  xDiff = cvAvg(velx).val[0];
	  yDiff = cvAvg(vely).val[0];
	  
      *xD = xDiff;
      *yD = yDiff;

  } // if
  else {
	prevFrame = cvCreateImage (
		cvGetSize(frame),
		frame->depth,
		frame->nChannels
	);
  } // else  
  
	cvCopy(frame, prevFrame);	
}
Example #11
0
//============================================================================
void AAM_TDM::CalcMeanTexture(const CvMat* AllTextures, CvMat* meanTexture)
{
	CvMat submat;
	for(int i = 0; i < meanTexture->cols; i++)
	{
		cvGetCol(AllTextures, &submat, i);
		cvmSet(meanTexture, 0, i, cvAvg(&submat).val[0]);
	}
}
Example #12
0
void myCvPixelAvgColorDescriptor(IplImage* img, MyCvDescriptor* _descriptor, CvPoint _pixel, int _neighbor_size) {
    assert((_descriptor->type == MY_DESCRIPTOR_TYPE_COLOR) && (_neighbor_size >= 0));

    IplImage *subimg = sub_square(img, _pixel, _neighbor_size);
    CvScalar *color = malloc(sizeof(CvScalar));
    *color = cvAvg(subimg, NULL);
    _descriptor->data = color;
    cvReleaseImage(&subimg);
}
Example #13
0
CvScalar th_brg2hsv(CvScalar bgr) {
	if (pxHSV == 0x0) {
		pxHSV = cvCreateImage(cvSize(1, 1), IPL_DEPTH_8U, 3);
		pxBGR = cvCloneImage(pxHSV);
	}
	cvSet(pxBGR, bgr, 0x0);
	cvCvtColor(pxBGR, pxHSV, CV_BGR2HSV);
	return cvAvg(pxHSV, 0x0);
}
Example #14
0
float brightness(IplImage* image)
{
	IplImage* temp;
    temp = cvCreateImage(cvSize(image->width,image->height),IPL_DEPTH_8U,3);
    cvCvtColor( image,temp,CV_RGB2YCrCb);
    cvSetImageCOI(temp,1);
    CvScalar scal = cvAvg( temp );
    float metric = (float)scal.val[0]/255;
    return(metric);
}
Example #15
0
CvScalar th_hsv2bgr(CvScalar hsv) {
	if (pxHSV == 0x0) {
		pxHSV = cvCreateImage(cvSize(1, 1), IPL_DEPTH_8U, 3);
		pxBGR = cvCloneImage(pxHSV);
	}

	cvSet(pxHSV, hsv, 0x0);
	cvCvtColor(pxHSV, pxBGR, CV_HSV2BGR);
	return cvAvg(pxBGR, 0x0);

}
void THISCLASS::OnReloadConfiguration()
{
	// Whether to correct the mean or not
	mCorrectMean = GetConfigurationBool(wxT("CorrectMean"), true);
	// We always calculate the background average, so we can select if we use the moving threshold during the segmentation
	if (mCorrectMean) {
		mBackgroundImageMean = cvAvg(mBackgroundImage);
	} else {
		mBackgroundImageMean = cvScalarAll(0);
	}
}
Example #17
0
void CameraColorSampler::update() {
    
    ps3eye.update();
	if(ps3eye.isFrameNew()){
		Mat map = toCv(ps3eye);
        IplImage img = map;
        cvSetImageROI(&img, cvRect(getf("roi_x")*640, getf("roi_y")*480, getf("roi_width")*640-1, getf("roi_height")*480-1));
        CvScalar c = cvAvg(&img);
        avg_color[0] = c.val[0] / 255;
        avg_color[1] = c.val[1] / 255;
        avg_color[2] = c.val[2] / 255;
	}
}
Example #18
0
/**
 * For each image provided, computes the average color vector
 * (represented as a CvScalar object).
 *
 * @param images	The images
 * @param numImages	The length of images
 * @returns 		An numImages length array of CvScalars were rv[i] is the average color in images[i]
 */
CvScalar* getAvgColors(IplImage** images, int numImages) {
	CvScalar* rv;
	int i;
	// TODO: create return vector
	rv = malloc(sizeof(CvScalar)*numImages);
	// TODO: iterate over images and compute average color
	for(i=0;i<numImages;i++){
		// TODO: for each image, compute the average color (hint: use cvAvg)
		rv[i] = cvAvg(images[i],NULL);
	}
	// TODO: return result
	return rv;
}
Example #19
0
void redrawFrame()
{
    // Since QueryFrame increments the frame position, move the frame counter
    // back to the current frame.
    cvSetCaptureProperty( cap, CV_CAP_PROP_POS_FRAMES, currentFrame );
    IplImage* frame = cvQueryFrame( cap );

    gdouble xSpacing = 0.0;
    gdouble ySpacing = 0.0;
    int i = 0;
    CvPoint pt1, pt2;

    int lastChange = 0;

    xSpacing = (rightX - leftX) / NUMBER_OF_KEYS;
    ySpacing = (rightY - leftY) / NUMBER_OF_KEYS;
    for ( i = 0; i < NUMBER_OF_KEYS; i++ )
    {
        // Set pt1 to center of point.
        pt1.x = leftX + ( i * xSpacing );
        pt1.y = leftY + ( i * ySpacing );
        // Adjust for ROI width.
        pt2.x = pt1.x + (ROIsize / 2.0);
        pt2.y = pt1.y + (ROIsize / 2.0);
        pt1.x = pt1.x - (ROIsize / 2.0);
        pt1.y = pt1.y - (ROIsize / 2.0);

        cvSetImageROI( frame, cvRect( pt1.x, pt1.y, ROIsize, ROIsize ) );
        avgs[i] = cvAvg( frame, NULL );

        cvResetImageROI( frame );

        // "I think" I'm looking at the green value for the pixel. Looked "good enough".
        // There's certainly a better way to do this.
        noteOn[i] = (avgs[i].val[1] < thresholdValue);

        if (noteOn[i])
        {
            // Since the note is on, paint a red box around the proper note.
            cvRectangle(frame, pt1, pt2, CV_RGB(255,0,0),1,8,0);
        }
        else
        {
            // The note is off. Paint a green box around the proper note.
            cvRectangle(frame, pt1, pt2, CV_RGB(0,255,0),1,8,0);
        }
    }
    cvShowImage(WINDOW_NAME, frame);
}
int64 ImageAnalysis::calcImageHash(IplImage* src)
{
         if(!src)
         {
                return 0;
         }
            IplImage *res=0, *gray=0, *bin =0;

            res = cvCreateImage( cvSize(8, 8), src->depth, src->nChannels);
            gray = cvCreateImage( cvSize(8, 8), IPL_DEPTH_8U, 1);
            bin = cvCreateImage( cvSize(8, 8), IPL_DEPTH_8U, 1);

            // уменьшаем картинку
            cvResize(src, res);
            // переводим в градации серого
            cvCvtColor(res, gray, CV_BGR2GRAY);
            // вычисляем среднее
            CvScalar average = cvAvg(gray);

            // получим бинарное изображение относительно среднего
            // для этого воспользуемся пороговым преобразованием
            cvThreshold(gray, bin, average.val[0], 255, CV_THRESH_BINARY);
            // построим хэш
            int64 hash = 0;

            int i=0;
            // пробегаемся по всем пикселям изображения
            for( int y=0; y<bin->height; y++ ) {
                    uchar* ptr = (uchar*) (bin->imageData + y * bin->widthStep);
                    for( int x=0; x<bin->width; x++ ) {
                            // 1 канал
                            if(ptr[x])
                            {
                                    // hash |= 1<<i;  // warning C4334: '<<' : result of 32-bit shift implicitly converted to 64 bits (was 64-bit shift intended?)
                                    hash |= 1<<i;
                            }
                            i++;
                    }
            }

    // освобождаем ресурсы
    cvReleaseImage(&res);
    cvReleaseImage(&gray);
    cvReleaseImage(&bin);
    return hash;
}
void THISCLASS::OnStep() {
	// Get and check input image
	IplImage *inputimage = mCore->mDataStructureImageGray.mImage;
	if (! inputimage) {
		AddError(wxT("No input image."));
		return;
	}
	if (inputimage->nChannels != 1) {
		AddError(wxT("The input image is not a grayscale image."));
		return;
	}

	// Check the background image
	if (! mBackgroundImage) {
		AddError(wxT("No background image loaded."));
		return;
	}
	if ((cvGetSize(inputimage).height != cvGetSize(mBackgroundImage).height) || (cvGetSize(inputimage).width != cvGetSize(mBackgroundImage).width)) {
		AddError(wxT("Input and background images don't have the same size."));
		return;
	}

	try {
		// Correct the inputimage with the difference in image mean
		if (mCorrectMean) {
			cvAddS(inputimage, cvScalar(mBackgroundImageMean.val[0] - cvAvg(inputimage).val[0]), inputimage);
		}

		// Background subtraction
		if (mMode == sMode_SubImageBackground) {
			cvSub(inputimage, mBackgroundImage, inputimage);
		} else if (mMode == sMode_SubBackgroundImage) {
			cvSub(mBackgroundImage, inputimage, inputimage);
		} else {
			cvAbsDiff(inputimage, mBackgroundImage, inputimage);
		}
	} catch (...) {
		AddError(wxT("Background subtraction failed."));
	}

	// Set the display
	DisplayEditor de(&mDisplayOutput);
	if (de.IsActive()) {
		de.SetMainImage(inputimage);
	}
}
Example #22
0
bool optimizeDepthMap()
{	
	cvErode(uImage,uImage,0,2);		//Smoothen the User Map as well
	cvDilate(uImage,uImage,0,2);
	CvScalar depthMean=cvAvg(dImage,uImage);							//Get teh Average Depth Value of the User Pixels
	cvNot(uImage,uImage);												//Invert the user pixels to paint the rest of the image with average user depth									 
	//viewImage(dImage);
	cvSet(dImage,depthMean,uImage);										 
	IplImage* tempImage=cvCreateImage(dSize,IPL_DEPTH_8U,1);
	cvConvertScale(dImage,tempImage,1.0/256);
	cvSmooth(tempImage,tempImage,CV_GAUSSIAN,7);//Perform Gaussian Smoothing, depth map is optimized.
	cvConvert(tempImage,dImage);
	cvScale(dImage,dImage,256);
	cvSet(dImage,cvScalar(0),uImage);	
	//viewImage(dImage);
	//cvSmooth(dImage,dImage,CV_GAUSSIAN,gaussian_m,gaussian_n,gaussian_e);//Perform Gaussian Smoothing, depth map is optimized.
	cvNot(uImage,uImage);
	cvReleaseImage(&tempImage);
	return true;
}
void THISCLASS::OnReloadConfiguration() {
	// Whether to correct the mean or not
	mCorrectMean = GetConfigurationBool(wxT("CorrectMean"), true);

	// Mode
	wxString modestr = GetConfigurationString(wxT("Mode"), wxT("AbsDiff"));
	if (modestr == wxT("SubImageBackground")) {
		mMode = sMode_SubImageBackground;
	} else if (modestr == wxT("SubBackgroundImage")) {
		mMode = sMode_SubBackgroundImage;
	} else {
		mMode = sMode_AbsDiff;
	}

	// We always calculate the background average, so we can select if we use the moving threshold during the segmentation
	if (mCorrectMean) {
		mBackgroundImageMean = cvAvg(mBackgroundImage);
	} else {
		mBackgroundImageMean = cvScalar(0);
	}
}
//----------------------------------------------------------------------------------------------
bool brightDarkFinder::getBrightEyeDarkEye(ofxCvGrayscaleAdvanced & img, ofRectangle & roi){
	
	img.setROI(roi);
	float	tempAvg = 	cvAvg(img.getCvImage()).val[0];
	
	averageVec.push_back(tempAvg);
	
	if (averageVec.size() > 320) averageVec.erase(averageVec.begin()); // calculation needs only 10, use 320 just for displaying.
	
	pixelAvginTenframes = 0;
	
	if (averageVec.size() > 10){
		for (int i = 0; i < 10; i++) {
			pixelAvginTenframes += averageVec[averageVec.size()-i-1];
		}
		pixelAvginTenframes /= 10;
	}
	
	img.resetROI();	
	
	if (pixelAvginTenframes < tempAvg) return true;
	else return false;
}
Example #25
0
static void work()
{
	if( W == 0 || H == 0 )
		dst = cvCloneImage(img);
	else
	{
		CvPoint p0 = cvPoint(0, 0);
		CvPoint p1 = cvPoint(img->width, img->height);
		dst = cvCloneImage(img);
		for(int i = 1 ; i <= W ; i++)
			for(int j = 1 ; j <= H; j++)
			{
				int w = (p1.x-p0.x)/W;
				int h = (p1.y-p0.y)/H;
				int X = p0.x + w*(i-1);
				int Y = p0.y + h*(j-1);
				cvSetImageROI(dst, cvRect(X, Y, w, h));
				CvScalar mean = cvAvg(dst);
				cvSet(dst, mean);
				cvResetImageROI(dst);
			}
	}
}
Example #26
0
int ChangeImgColor(IplImage *scr)
{
	CvScalar avgChannels = cvAvg(scr);
	double avgB = avgChannels.val[0];//获取第一通道平均值
	double avgG = avgChannels.val[1];//获取第二通道平均值
	double avgR = avgChannels.val[2];//获取第三通道平均值

	CvScalar idensity;
	int i = 0, j = 0;
	for (; i < scr->height; i++)
	{
		for (j = 0; j < scr->width; j++)
		{
			idensity = cvGet2D(scr, i, j);
			idensity.val[0] = idensity.val[0] - avgB + 19;//修改色素值
			idensity.val[1] = idensity.val[1] - avgG + 79;
			idensity.val[2] = idensity.val[2] - avgR + 168;
			cvSet2D(scr, i, j, idensity);
		}
	}

	return 0;
}
Example #27
0
File: dct.cpp Project: cherip/dct
IplImage *resize_image( IplImage *src, const int nSub ) {
    int subWidth = src->width / nSub;
    int subHeight = src->height / nSub;
    IplImage *subImage = cvCreateImage( cvSize( nSub, nSub ),
            IPL_DEPTH_32F, 1);
   
    for ( int i = 0; i < nSub; i++ ) {
        for ( int j = 0; j < nSub; j++ ) {
            int subY = i * subHeight;
            int subX = j * subWidth;

            cvSetImageROI( src, cvRect( subX, subY, subWidth, subHeight ) );
            CvScalar avg = cvAvg( src ); 
            set_pixval32f( subImage, j, i, avg.val[0] );
            cvResetImageROI( src );

      //      std::cout << avg.val[0] << " ";
        }
      //  std::cout << endl;
    }
    //std::cout << "----------------\n";

    return subImage;
}
Example #28
0
bool BouyBaseObject::Initialize(const std::string& parameter)
{
    mType = parameter;
    if(!mXml.Load("settings/vision/visionobjects.xml","VisionObjects"))
    {
        return false;
    }
    Clear();
    std::string file;
    mXml.GetVar(parameter+"@debug",mDebug);
    if(mXml.GetVar(parameter+".Images.Near@file",file))
    {
        mNearImage = cvLoadImage(file.c_str());
        if(mNearImage)
        {
            mNearColor = cvAvg(mNearImage);
            mNearHist = VisionUtils::CreateHSVHist(mNearImage);
        }
    }
    if(mXml.GetVar(parameter+".Images.Far@file",file))
    {
        mFarImage = cvLoadImage(file.c_str());
        if(mFarImage)
        {
            //cvCvtColor(temp, convert, CV_BGR2YCrCb);
            mFarColor = cvAvg(mFarImage);
            mFarHist = VisionUtils::CreateHSVHist(mFarImage);
        }
    }
    if(mXml.GetVar(parameter+".Images.Template@file",file))
    {
        IplImage * temp = cvLoadImage(file.c_str());
        if(temp)
        {
            mTemplate = cvCreateImage( cvGetSize(temp), 8, 1 );
            cvConvertImage(temp,mTemplate);
            //cvThreshold(mBouyTemplate,mTemplate,200,255,CV_THRESH_BINARY);
            cvReleaseImage(&temp);
        }
    }
    bool enabled = false;
    if(mXml.GetVar(parameter+".Colors@enable",enabled))
    {
        if(enabled)
        {
            mXml.GetVar(parameter+".Colors.Near@r",mNearColor.val[0]);
            mXml.GetVar(parameter+".Colors.Near@g",mNearColor.val[1]);
            mXml.GetVar(parameter+".Colors.Near@b",mNearColor.val[2]);
            mXml.GetVar(parameter+".Colors.Far@r",mFarColor.val[0]);
            mXml.GetVar(parameter+".Colors.Far@g",mFarColor.val[1]);
            mXml.GetVar(parameter+".Colors.Far@b",mFarColor.val[2]);
        }
    }
    double val = 0;
    if(mXml.GetVar(parameter+".Options@mainthreshold",val))
    {
        mMainThreshold = val;
    }
    if(mXml.GetVar(parameter+".Options@templatethreshold",val))
    {
        mTemplateThreshold = val;
    }
    if(mXml.GetVar(parameter+".Options@minnoisesizepercent",val))
    {
        mMinNoiseSizePercent = val;
    }


    if(mXml.GetVar(parameter+".Options.HistMask@enable",enabled))
    {
        mEnableHist = enabled;
    }
    if(mXml.GetVar(parameter+".Options.ColorMask@enable",enabled))
    {
        mEnableColor = enabled;
    }
    if(mXml.GetVar(parameter+".Options.SegmentMask@enable",enabled))
    {
        mEnableSegment = enabled;
    }
    if(mXml.GetVar(parameter+".Options.GVColorMask@enable",enabled))
    {
        mEnableGVColor = enabled;
    }

    if(mXml.GetVar(parameter+".Options.HistMask@weight",val))
    {
        mHistWeight = val;
    }
    if(mXml.GetVar(parameter+".Options.ColorMask@weight",val))
    {
        mColorWeight = val;
    }
    if(mXml.GetVar(parameter+".Options.SegmentMask@weight",val))
    {
        mSegmentWeight = val;
    }
    if(mXml.GetVar(parameter+".Options.GVColorMask@weight",val))
    {
        mGVColorWeight= val;
    }
    if(mNearImage  == NULL || mFarImage == NULL || mTemplate == NULL) return false;
    return true;
}
Example #29
0
IplImage* BouyObject::SegmentationMask2(const IplImage * imgIn, IplImage* debugOut) const
{
//    CvSize imageSize = cvSize(imgIn->width & -2, imgIn->height & -2 );
//    IplImage* imgSmallCopy = cvCreateImage( cvSize(imageSize.width/2, imageSize.height/2), IPL_DEPTH_8U, 1 );


    IplImage * imgOut = cvCreateImage(cvGetSize(imgIn),IPL_DEPTH_8U, 1);
    IplImage * circleMask = cvCreateImage(cvGetSize(imgIn),IPL_DEPTH_8U, 1);
    IplImage * src = cvCloneImage(imgIn);
    IplImage * scratch = cvCloneImage(src);
    IplImage * hist = HistogramMask(imgIn);
    //IplImage * bestMask = cvCreateImage(cvGetSize(imgIn),IPL_DEPTH_8U, 1);

    CvMemStorage* storage = cvCreateMemStorage(0);
    CvSeq* comp = NULL;

    CvFont font;
    cvInitFont(&font, CV_FONT_HERSHEY_SIMPLEX, .5,.5);
    std::ostringstream s;

    cvZero(imgOut);
    cvZero(circleMask);
    cvZero(scratch);
    //cvZero(bestMask);

    CvScalar avgColor;
    double bestColor = -1;
    CvRect bestRect;
    double bestDiag = 0;
//    IplImage* hsv = cvCreateImage( cvGetSize(imgIn), 8, 3 );

//    IplImage * chan0 = cvCreateImage(cvGetSize(imgIn),IPL_DEPTH_8U, 1);
//    IplImage * segsum = cvCreateImage(cvGetSize(imgIn),IPL_DEPTH_8U, 1);

    //cvCvtColor( imgIn, hsv, CV_BGR2YCrCb );
    //cvCopyImage(imgIn,hsv);
    //cvSplit(hsv,chan0,chan1,chan2, NULL);
    //cvConvertImage(imgIn,src);

    //lower last param for more segments
    //cvPyrSegmentation( hsv, scratch, storage, &comp, 3, 100, 90 );


    cvPyrSegmentation( src, scratch, storage, &comp, 2, 0, 100);
    int n_comp = comp->total;

    std::list<CvBox2D> blobList;
    for( int i = n_comp-1; i>=1; i-- )
    {
        CvConnectedComp* cc = (CvConnectedComp*) cvGetSeqElem( comp, i );
        cvAbsDiffS(scratch,src,cc->value);
        cvNot(src,src);
        cvThreshold(src,src,254,255,CV_THRESH_BINARY);
        blobList = VisionUtils::GetBlobBoxes(src,0.0008,.95,false);
        for(std::list<CvBox2D>::iterator it = blobList.begin(); it != blobList.end(); it++)
        {
            CvRect rect = VisionUtils::ToCvRect(*it);
            VisionUtils::MakeSquare(rect);
            double diagonal = sqrt(rect.width * rect.width + rect.height * rect.height);
            cvDrawCircle(circleMask,cvPoint(rect.x+rect.width/2.,rect.y+rect.height/2),diagonal/2.5,CV_RGB(255,255,255),CV_FILLED);

            avgColor = cvAvg (hist,circleMask);
            if((bestColor < 0 || bestColor < avgColor.val[0]) && avgColor.val[0] > mSegment2Threshold)
            {
                bestDiag = diagonal;
                bestColor = avgColor.val[0];
                bestRect = rect;
                cvCopy(circleMask,imgOut);
            }
            //cvMinMaxLoc(imgIn,)
            cvZero(circleMask);
        }
    }
    if(debugOut && bestColor > 0)
    {
         s.clear();
        s << "bestColor(" << bestColor << ") " << mType;
        cvPutText(debugOut,s.str().c_str(),cvPoint(bestRect.x+bestRect.width/2.,bestRect.y+bestRect.height/2),&font,CV_RGB(255,255,255));
        cvDrawCircle(debugOut,cvPoint(bestRect.x+bestRect.width/2.,bestRect.y+bestRect.height/2),bestDiag/2.5,CV_RGB(255,255,255));
    }
//    cvShowImage("best",bestMask);
//    cvWaitKey(0);

    //VisionUtils::ClearEdges(imgOut);
    cvReleaseImage(&scratch);
    cvReleaseImage(&src);
    cvReleaseImage(&hist);
    cvReleaseImage(&circleMask);
    cvReleaseMemStorage( &storage );
    return imgOut;
}
Example #30
0
bool BouyObject::Initialize(const std::string& parameter)
{
    mType = parameter;
    if(!mXml.Load("settings/vision/visionobjects.xml","VisionObjects"))
    {
        return false;
    }
    Clear();
    std::string file;
    mXml.GetVar(parameter+"@debug",mDebug);
    if(mXml.GetVar(parameter+".Images.Near@file",file))
    {
        mNearImage = cvLoadImage(file.c_str());
        if(mNearImage)
        {
            mNearColor = cvAvg(mNearImage);
            mNearHist = VisionUtils::CreateHSVHist(mNearImage);
        }
    }
    if(mXml.GetVar(parameter+".Images.Far@file",file))
    {
        mFarImage = cvLoadImage(file.c_str());
        if(mFarImage)
        {
            //cvCvtColor(temp, convert, CV_BGR2YCrCb);
            mFarColor = cvAvg(mFarImage);
            mFarHist = VisionUtils::CreateHSVHist(mFarImage);
        }
    }
    if(mXml.GetVar(parameter+".Images.Template@file",file))
    {
        IplImage * temp = cvLoadImage(file.c_str());
        if(temp)
        {
            mBouyTemplate = cvCreateImage( cvGetSize(temp), 8, 1 );
            cvConvertImage(temp,mBouyTemplate);
            //cvThreshold(mBouyTemplate,mBouyTemplate,200,255,CV_THRESH_BINARY);
            cvReleaseImage(&temp);
        }
    }
    bool enabled = false;
    if(mXml.GetVar(parameter+".Colors@enable",enabled))
    {
        if(enabled)
        {
            mXml.GetVar(parameter+".Colors.Near@r",mNearColor.val[0]);
            mXml.GetVar(parameter+".Colors.Near@g",mNearColor.val[1]);
            mXml.GetVar(parameter+".Colors.Near@b",mNearColor.val[2]);
            mXml.GetVar(parameter+".Colors.Far@r",mFarColor.val[0]);
            mXml.GetVar(parameter+".Colors.Far@g",mFarColor.val[1]);
            mXml.GetVar(parameter+".Colors.Far@b",mFarColor.val[2]);
        }
    }
    double val = 0;
    if(mXml.GetVar(parameter+".Options@mainthreshold",val))
    {
        mMainThreshold = val;
    }
    if(mXml.GetVar(parameter+".Options@templatethreshold",val))
    {
        mTemplateThreshold = val;
    }
    if(mXml.GetVar(parameter+".Options@minnoisesizepercent",val))
    {
        mMinNoiseSizePercent = val;
    }


    if(mXml.GetVar(parameter+".Options.HistMask@enable",enabled))
    {
        mEnableHist = enabled;
    }
    if(mXml.GetVar(parameter+".Options.ColorMask@enable",enabled))
    {
        mEnableColor = enabled;
    }
    if(mXml.GetVar(parameter+".Options.SegmentMask@enable",enabled))
    {
        mEnableSegment = enabled;
    }
    if(mXml.GetVar(parameter+".Options.SegmentMask2@enable",enabled))
    {
        mEnableSegment2 = enabled;
    }
    if(mXml.GetVar(parameter+".Options.GVColorMask@enable",enabled))
    {
        mEnableGVColor = enabled;
    }

    if(mXml.GetVar(parameter+".Options.HistMask@weight",val))
    {
        mHistWeight = val;
    }
    if(mXml.GetVar(parameter+".Options.ColorMask@weight",val))
    {
        mColorWeight = val;
    }
    if(mXml.GetVar(parameter+".Options.SegmentMask@weight",val))
    {
        mSegmentWeight = val;
    }
    if(mXml.GetVar(parameter+".Options.SegmentMask2@weight",val))
    {
        mSegmentWeight2 = val;
    }
    if(mXml.GetVar(parameter+".Options.SegmentMask2@segment2threshold",val))
    {
        mSegment2Threshold = val;
    }
    if(mXml.GetVar(parameter+".Options.GVColorMask@weight",val))
    {
        mGVColorWeight= val;
    }
    VisionObject::MaskOption maskoption;
    std::string s;
    if(mXml.GetVar(parameter+".Options.ColorSpaceMask1@space",s))
    {
        maskoption.mCvColorConversionName = s;
    }
    if(mXml.GetVar(parameter+".Options.ColorSpaceMask1@enable",enabled))
    {
        maskoption.mEnabledFlag = enabled;
    }
    if(mXml.GetVar(parameter+".Options.ColorSpaceMask1@weight",val))
    {
        maskoption.mWeight = val;
    }
    if(mXml.GetVar(parameter+".Options.ColorSpaceMask1@invert",enabled))
    {
        maskoption.mInvertFlag = enabled;
    }
    if(mXml.GetVar(parameter+".Options.ColorSpaceMask1@channel",val))
    {
        maskoption.mChannelIndex = val;
        mMaskOptions.push_back(maskoption);
    }

    if(mXml.GetVar(parameter+".Options.ColorSpaceMask2@space",s))
    {
        maskoption.mCvColorConversionName = s;
    }
    if(mXml.GetVar(parameter+".Options.ColorSpaceMask2@enable",enabled))
    {
        maskoption.mEnabledFlag = enabled;
    }
    if(mXml.GetVar(parameter+".Options.ColorSpaceMask2@weight",val))
    {
        maskoption.mWeight = val;
    }
    if(mXml.GetVar(parameter+".Options.ColorSpaceMask2@invert",enabled))
    {
        maskoption.mInvertFlag = enabled;
    }
    if(mXml.GetVar(parameter+".Options.ColorSpaceMask2@channel",val))
    {
        maskoption.mChannelIndex = val;
        mMaskOptions.push_back(maskoption);
    }
    std::string tag;
    if(mXml.GetVar(parameter+".NearTarget@tag",tag))
    {
        mNearTarget.Setup(mXml,tag,parameter+".NearTarget");
    }
    if(mXml.GetVar(parameter+".FarTarget@tag",tag))
    {
        mFarTarget.Setup(mXml,tag,parameter+".FarTarget");
    }
    if(mNearImage  == NULL || mFarImage == NULL || mBouyTemplate == NULL) return false;
    return true;
}