void cv::ocl::oclMat::upload(const Mat &m)
{
    CV_DbgAssert(!m.empty());
    Size wholeSize;
    Point ofs;
    m.locateROI(wholeSize, ofs);
    int type = m.type();
    //if(m.channels() == 3)
    //type = CV_MAKETYPE(m.depth(), 4);
    create(wholeSize, type);

    //if(m.channels() == 3)
    //{
    //int pitch = GPU_MATRIX_MALLOC_STEP(wholeSize.width * 3 * m.elemSize1());
    //int err;
    //cl_mem temp = clCreateBuffer(clCxt->clContext,CL_MEM_READ_WRITE,
    //pitch*wholeSize.height,0,&err);
    //CV_DbgAssert(err==0);

    //openCLMemcpy2D(clCxt,temp,pitch,m.datastart,m.step,wholeSize.width*m.elemSize(),wholeSize.height,clMemcpyHostToDevice);
    //convert_C3C4(temp, *this, pitch);
    //}
    //else
    openCLMemcpy2D(clCxt, data, step, m.datastart, m.step, wholeSize.width * elemSize(), wholeSize.height, clMemcpyHostToDevice);

    rows = m.rows;
    cols = m.cols;
    offset = ofs.y * step + ofs.x * elemSize();
    download_channels = m.channels();
}
void cv::ocl::oclMat::upload(const Mat &m)
{
    if (!Context::getContext()->supportsFeature(FEATURE_CL_DOUBLE) && m.depth() == CV_64F)
    {
        CV_Error(Error::OpenCLDoubleNotSupported, "Selected device doesn't support double");
        return;
    }

    CV_DbgAssert(!m.empty());
    Size wholeSize;
    Point ofs;
    m.locateROI(wholeSize, ofs);
    create(wholeSize, m.type());

    if (m.channels() == 3)
    {
        int pitch = wholeSize.width * 3 * m.elemSize1();
        int tail_padding = m.elemSize1() * 3072;
        int err;
        cl_mem temp = clCreateBuffer(*(cl_context*)clCxt->getOpenCLContextPtr(), CL_MEM_READ_WRITE,
                                     (pitch * wholeSize.height + tail_padding - 1) / tail_padding * tail_padding, 0, &err);
        openCLVerifyCall(err);

        openCLMemcpy2D(clCxt, temp, pitch, m.datastart, m.step, wholeSize.width * m.elemSize(), wholeSize.height, clMemcpyHostToDevice, 3);
        convert_C3C4(temp, *this);
        openCLSafeCall(clReleaseMemObject(temp));
    }
    else
        openCLMemcpy2D(clCxt, data, step, m.datastart, m.step, wholeSize.width * elemSize(), wholeSize.height, clMemcpyHostToDevice);

    rows = m.rows;
    cols = m.cols;
    offset = ofs.y * step + ofs.x * elemSize();
}
void cv::ocl::oclMat::upload(const Mat &m)
{
    CV_DbgAssert(!m.empty());
    Size wholeSize;
    Point ofs;
    m.locateROI(wholeSize, ofs);
    //   int type = m.type();
    //   if(m.oclchannels() == 3)
    //{
    //	type = CV_MAKETYPE(m.depth(), 4);
    //}
    create(wholeSize, m.type());

    if(m.channels() == 3)
    {
        int pitch = wholeSize.width * 3 * m.elemSize1();
        int tail_padding = m.elemSize1() * 3072;
        int err;
        cl_mem temp = clCreateBuffer((cl_context)clCxt->oclContext(), CL_MEM_READ_WRITE,
                                     (pitch * wholeSize.height + tail_padding - 1) / tail_padding * tail_padding, 0, &err);
        openCLVerifyCall(err);

        openCLMemcpy2D(clCxt, temp, pitch, m.datastart, m.step, wholeSize.width * m.elemSize(), wholeSize.height, clMemcpyHostToDevice, 3);
        convert_C3C4(temp, *this);
        //int* cputemp=new int[wholeSize.height*wholeSize.width * 3];
        //int* cpudata=new int[this->step*this->wholerows/sizeof(int)];
        //openCLSafeCall(clEnqueueReadBuffer(clCxt->impl->clCmdQueue, temp, CL_TRUE,
        //						0, wholeSize.height*wholeSize.width * 3* sizeof(int), cputemp, 0, NULL, NULL));
        //openCLSafeCall(clEnqueueReadBuffer(clCxt->impl->clCmdQueue, (cl_mem)data, CL_TRUE,
        //						0, this->step*this->wholerows, cpudata, 0, NULL, NULL));
        //for(int i=0;i<wholeSize.height;i++)
        //{
        //	int *a = cputemp+i*wholeSize.width * 3,*b = cpudata + i*this->step/sizeof(int);
        //	for(int j=0;j<wholeSize.width;j++)
        //	{
        //		if((a[3*j] != b[4*j])||(a[3*j+1] != b[4*j+1])||(a[3*j+2] != b[4*j+2]))
        //			printf("rows=%d,cols=%d,cputtemp=%d,%d,%d;cpudata=%d,%d,%d\n",
        //			i,j,a[3*j],a[3*j+1],a[3*j+2],b[4*j],b[4*j+1],b[4*j+2]);
        //	}
        //}
        //delete []cputemp;
        //delete []cpudata;
        openCLSafeCall(clReleaseMemObject(temp));
    }
    else
    {
        openCLMemcpy2D(clCxt, data, step, m.datastart, m.step, wholeSize.width * elemSize(), wholeSize.height, clMemcpyHostToDevice);
    }

    rows = m.rows;
    cols = m.cols;
    offset = ofs.y * step + ofs.x * elemSize();
    //download_channels = m.channels();
}
Example #4
0
void write_hdf5_image(H5File h5f, const char *name, const Mat &im)
{
    DSetCreatPropList cparms;
    hsize_t chunk_dims[2] = {256, 256};
    hsize_t dims[2];
    dims[0] = im.size().height;
    dims[1] = im.size().width;
  
    if (chunk_dims[0] > dims[0]) {
        chunk_dims[0] = dims[0];
    }
    if (chunk_dims[1] > dims[1]) {
        chunk_dims[1] = dims[1];
    }

    cparms.setChunk(2, chunk_dims);
    cparms.setShuffle();
    cparms.setDeflate(5);

    DataSet dataset = h5f.createDataSet(name, PredType::NATIVE_FLOAT,
                                        DataSpace(2, dims, dims),
                                        cparms);

    Mat image;
    if (im.type() !=  CV_32F)
        im.convertTo(image, CV_32F);
    else
        image = im;
    
    DataSpace imspace;
    float *imdata;
    if (image.isContinuous()) {
        imspace = dataset.getSpace(); // same size as 
        imspace.selectAll();
        imdata = image.ptr<float>();
    } else {
        // we are working with an ROI
        assert (image.isSubmatrix());
        Size parent_size; Point parent_ofs;
        image.locateROI(parent_size, parent_ofs);
        hsize_t parent_count[2];
        parent_count[0] = parent_size.height; parent_count[1] = parent_size.width;
        imspace.setExtentSimple(2, parent_count);
        hsize_t im_offset[2], im_size[2];
        im_offset[0] = parent_ofs.y; im_offset[1] = parent_ofs.x;
        im_size[0] = image.size().height; im_size[1] = image.size().width;
        imspace.selectHyperslab(H5S_SELECT_SET, im_size, im_offset);
        imdata = image.ptr<float>() - parent_ofs.x - parent_ofs.y * parent_size.width;
    }
    dataset.write(imdata, PredType::NATIVE_FLOAT, imspace);
}
Example #5
0
/**
 * OrientImage - Readjust image orientation to be correctly upright
 */
void ImageReader::orientImage( cv::Mat &examImage, cv::Point2f &UL, cv::Point2f &UR, 
	cv::Point2f &LL, cv::Point2f &LR, float &widthRatio, float &heightRatio ) {
	// Set up lengths and widths
	float vLength = sqrt( pow( abs(LL.x - UL.x), 2 ) + pow( abs(LL.y - UL.y), 2 ) );
	float hLength = sqrt( pow( abs(UR.x - UL.x), 2 ) + pow( abs(UR.y - UL.y), 2 ) );
	// handle rotation issue (if on side - vertical length "less" than horizontal)
	if( vLength < hLength ) {
		float vTemp = vLength;
		vLength = hLength;
		hLength = vTemp;
	} 
	// Locates the ROI offset for its ul
	Point2f srcQuad[4], dstQuad[4];
	vector< Point2f > eRectPoints(4);
	eRectPoints[0] = UL;
	eRectPoints[1] = UR;
	eRectPoints[2] = LL;
	eRectPoints[3] = LR;
	Rect examRegion = minAreaRect( eRectPoints ).boundingRect();
	Size size;
	Point ofs;
	Mat examCopy = examImage( examRegion );
	examCopy.locateROI( size, ofs );
	// Set up points for the "source" array
	srcQuad[0] = Point2f( ofs.x + abs(UL.x - ofs.x), abs( UL.y - ofs.y ) );
	srcQuad[1] = Point2f( srcQuad[0].x + hLength, srcQuad[0].y );
	srcQuad[2] = Point2f( srcQuad[0].x, srcQuad[0].y + vLength );
	srcQuad[3] = Point2f( srcQuad[0].x + hLength, srcQuad[0].y + vLength );
	// Set up points for the "destination" array
	dstQuad[ 0 ] = UL;
	dstQuad[ 1 ] = UR;
	dstQuad[ 2 ] = LL;
	dstQuad[ 3 ] = LR;
	// Calc perspective transform, prepare the clone, & warp
	Mat warp_matrix = getPerspectiveTransform( srcQuad, dstQuad );
	warpPerspective( examImage.clone(), examImage, warp_matrix,
	 Size(examImage.cols * 1.5f, examImage.rows * 1.5f), WARP_INVERSE_MAP );
	// Resize examImage to only have coordinate values
	Rect rect( srcQuad[0], srcQuad[3] );
	Mat fittedImage = examImage( rect );
	examImage = fittedImage;
	// Recalculates size ratios
	widthRatio = examImage.cols / (mainUR.x - mainUL.x);
	heightRatio = examImage.rows / (mainLL.y - mainUL.y);
	// Reassign points
	UL = Point2f( 0, 0 );
	UR = Point2f( hLength, 0 );
	LL = Point2f( 0, vLength );
	LR = Point2f( hLength, vLength );
}
void cv::ocl::oclMat::upload(const Mat &m)
{
    CV_DbgAssert(!m.empty());
    Size wholeSize;
    Point ofs;
    m.locateROI(wholeSize, ofs);
    if(m.channels() == 3)
    {
        create(wholeSize, m.type());
        int pitch = wholeSize.width * 3 * m.elemSize1();
        int tail_padding = m.elemSize1() * 3072;
        int err;
        cl_mem temp;
        if(gDeviceMemType!=DEVICE_MEM_UHP && gDeviceMemType!=DEVICE_MEM_CHP){
            temp = clCreateBuffer((cl_context)clCxt->oclContext(), CL_MEM_READ_WRITE,
                                  (pitch * wholeSize.height + tail_padding - 1) / tail_padding * tail_padding, 0, &err);
            openCLVerifyCall(err);
            openCLMemcpy2D(clCxt, temp, pitch, m.datastart, m.step,
                           wholeSize.width * m.elemSize(), wholeSize.height, clMemcpyHostToDevice, 3);
        }
        else{
            temp = clCreateBuffer((cl_context)clCxt->oclContext(), CL_MEM_READ_WRITE|CL_MEM_USE_HOST_PTR,
                                  (pitch * wholeSize.height + tail_padding - 1) / tail_padding * tail_padding, m.datastart, &err);
            openCLVerifyCall(err);
        }


        convert_C3C4(temp, *this);
        openCLSafeCall(clReleaseMemObject(temp));
    }
    else
    {
        // try to use host ptr
        createEx(wholeSize, m.type(), gDeviceMemRW, gDeviceMemType, m.datastart);
        if(gDeviceMemType!=DEVICE_MEM_UHP && gDeviceMemType!=DEVICE_MEM_CHP)
            openCLMemcpy2D(clCxt, data, step, m.datastart, m.step,
                           wholeSize.width * elemSize(), wholeSize.height, clMemcpyHostToDevice);
    }

    rows = m.rows;
    cols = m.cols;
    offset = ofs.y * step + ofs.x * elemSize();
}
Example #7
0
void read_hdf5_image(H5File h5f, Mat &image_out, const char *name, const Rect &roi=Rect(0,0,0,0))
{
    DataSet dataset = h5f.openDataSet(name);
    DataSpace dspace = dataset.getSpace();
    assert (dspace.getSimpleExtentNdims() == 2);
    hsize_t dims[2];
    dspace.getSimpleExtentDims(dims);
    if ((roi.width == 0) && (roi.height == 0)) {
        image_out.create(dims[0], dims[1], CV_32F);
        dspace.selectAll();
    } else {
        image_out.create(roi.height, roi.width, CV_32F);
        hsize_t _offset[2], _size[2];
        _offset[0] = roi.y; _offset[1] = roi.x;
        _size[0] = roi.height; _size[1] = roi.width;
        dspace.selectHyperslab(H5S_SELECT_SET, _size, _offset);
    }
    
    DataSpace imspace;
    float *imdata;
    if (image_out.isContinuous()) {
        dims[0] = image_out.size().height; dims[1] = image_out.size().width;
        imspace = DataSpace(2, dims);
        imspace.selectAll();
        imdata = image_out.ptr<float>();
    } else {
        // we are working with an ROI
        assert (image_out.isSubmatrix());
        Size parent_size; Point parent_ofs;
        image_out.locateROI(parent_size, parent_ofs);
        hsize_t parent_count[2];
        parent_count[0] = parent_size.height; parent_count[1] = parent_size.width;
        imspace.setExtentSimple(2, parent_count);
        hsize_t im_offset[2], im_size[2];
        im_offset[0] = parent_ofs.y; im_offset[1] = parent_ofs.x;
        im_size[0] = image_out.size().height; im_size[1] = image_out.size().width;
        imspace.selectHyperslab(H5S_SELECT_SET, im_size, im_offset);
        imdata = image_out.ptr<float>() - parent_ofs.x - parent_ofs.y * parent_size.width;
    }
    dataset.read(imdata, PredType::NATIVE_FLOAT, imspace, dspace);
}
void write_feature(H5File h5f, const Mat &image_in, const char *name)
{
    // make sure the sizes match
    assert (imsize == image_in.size());

    // make sure the image is in native float
    Mat image;
    if (image_in.type() !=  CV_32F)
        image_in.convertTo(image, CV_32F);
    else
        image = image_in;
    
    DataSet dataset = create_dataset(h5f, name);

    DataSpace imspace;
    float *imdata;
    if (image.isContinuous()) {
        imspace = dataset.getSpace(); // same size as 
        imspace.selectAll();
        imdata = image.ptr<float>();
    } else {
        // we are working with an ROI
        assert (image.isSubmatrix());
        Size parent_size; Point parent_ofs;
        image.locateROI(parent_size, parent_ofs);
        hsize_t parent_count[2];
        parent_count[0] = parent_size.height; parent_count[1] = parent_size.width;
        imspace.setExtentSimple(2, parent_count);
        hsize_t im_offset[2], im_size[2];
        im_offset[0] = parent_ofs.y; im_offset[1] = parent_ofs.x;
        im_size[0] = image.size().height; im_size[1] = image.size().width;
        imspace.selectHyperslab(H5S_SELECT_SET, im_size, im_offset);
        imdata = image.ptr<float>() - parent_ofs.x - parent_ofs.y * parent_size.width;
    }
    dataset.write(imdata, PredType::NATIVE_FLOAT, imspace);
}
Example #9
0
void crossCorr( const Mat& img, const Mat& _templ, Mat& corr,
                Size corrsize, int ctype,
                Point anchor, double delta, int borderType )
{
    const double blockScale = 4.5;
    const int minBlockSize = 256;
    std::vector<uchar> buf;

    Mat templ = _templ;
    int depth = img.depth(), cn = img.channels();
    int tdepth = templ.depth(), tcn = templ.channels();
    int cdepth = CV_MAT_DEPTH(ctype), ccn = CV_MAT_CN(ctype);

    CV_Assert( img.dims <= 2 && templ.dims <= 2 && corr.dims <= 2 );

    if( depth != tdepth && tdepth != std::max(CV_32F, depth) )
    {
        _templ.convertTo(templ, std::max(CV_32F, depth));
        tdepth = templ.depth();
    }

    CV_Assert( depth == tdepth || tdepth == CV_32F);
    CV_Assert( corrsize.height <= img.rows + templ.rows - 1 &&
               corrsize.width <= img.cols + templ.cols - 1 );

    CV_Assert( ccn == 1 || delta == 0 );

    corr.create(corrsize, ctype);

    int maxDepth = depth > CV_8S ? CV_64F : std::max(std::max(CV_32F, tdepth), cdepth);
    Size blocksize, dftsize;

    blocksize.width = cvRound(templ.cols*blockScale);
    blocksize.width = std::max( blocksize.width, minBlockSize - templ.cols + 1 );
    blocksize.width = std::min( blocksize.width, corr.cols );
    blocksize.height = cvRound(templ.rows*blockScale);
    blocksize.height = std::max( blocksize.height, minBlockSize - templ.rows + 1 );
    blocksize.height = std::min( blocksize.height, corr.rows );

    dftsize.width = std::max(getOptimalDFTSize(blocksize.width + templ.cols - 1), 2);
    dftsize.height = getOptimalDFTSize(blocksize.height + templ.rows - 1);
    if( dftsize.width <= 0 || dftsize.height <= 0 )
        CV_Error( CV_StsOutOfRange, "the input arrays are too big" );

    // recompute block size
    blocksize.width = dftsize.width - templ.cols + 1;
    blocksize.width = MIN( blocksize.width, corr.cols );
    blocksize.height = dftsize.height - templ.rows + 1;
    blocksize.height = MIN( blocksize.height, corr.rows );

    Mat dftTempl( dftsize.height*tcn, dftsize.width, maxDepth );
    Mat dftImg( dftsize, maxDepth );

    int i, k, bufSize = 0;
    if( tcn > 1 && tdepth != maxDepth )
        bufSize = templ.cols*templ.rows*CV_ELEM_SIZE(tdepth);

    if( cn > 1 && depth != maxDepth )
        bufSize = std::max( bufSize, (blocksize.width + templ.cols - 1)*
            (blocksize.height + templ.rows - 1)*CV_ELEM_SIZE(depth));

    if( (ccn > 1 || cn > 1) && cdepth != maxDepth )
        bufSize = std::max( bufSize, blocksize.width*blocksize.height*CV_ELEM_SIZE(cdepth));

    buf.resize(bufSize);

    // compute DFT of each template plane
    for( k = 0; k < tcn; k++ )
    {
        int yofs = k*dftsize.height;
        Mat src = templ;
        Mat dst(dftTempl, Rect(0, yofs, dftsize.width, dftsize.height));
        Mat dst1(dftTempl, Rect(0, yofs, templ.cols, templ.rows));

        if( tcn > 1 )
        {
            src = tdepth == maxDepth ? dst1 : Mat(templ.size(), tdepth, &buf[0]);
            int pairs[] = {k, 0};
            mixChannels(&templ, 1, &src, 1, pairs, 1);
        }

        if( dst1.data != src.data )
            src.convertTo(dst1, dst1.depth());

        if( dst.cols > templ.cols )
        {
            Mat part(dst, Range(0, templ.rows), Range(templ.cols, dst.cols));
            part = Scalar::all(0);
        }
        dft(dst, dst, 0, templ.rows);
    }

    int tileCountX = (corr.cols + blocksize.width - 1)/blocksize.width;
    int tileCountY = (corr.rows + blocksize.height - 1)/blocksize.height;
    int tileCount = tileCountX * tileCountY;

    Size wholeSize = img.size();
    Point roiofs(0,0);
    Mat img0 = img;

    if( !(borderType & BORDER_ISOLATED) )
    {
        img.locateROI(wholeSize, roiofs);
        img0.adjustROI(roiofs.y, wholeSize.height-img.rows-roiofs.y,
                       roiofs.x, wholeSize.width-img.cols-roiofs.x);
    }
    borderType |= BORDER_ISOLATED;

    // calculate correlation by blocks
    for( i = 0; i < tileCount; i++ )
    {
        int x = (i%tileCountX)*blocksize.width;
        int y = (i/tileCountX)*blocksize.height;

        Size bsz(std::min(blocksize.width, corr.cols - x),
                 std::min(blocksize.height, corr.rows - y));
        Size dsz(bsz.width + templ.cols - 1, bsz.height + templ.rows - 1);
        int x0 = x - anchor.x + roiofs.x, y0 = y - anchor.y + roiofs.y;
        int x1 = std::max(0, x0), y1 = std::max(0, y0);
        int x2 = std::min(img0.cols, x0 + dsz.width);
        int y2 = std::min(img0.rows, y0 + dsz.height);
        Mat src0(img0, Range(y1, y2), Range(x1, x2));
        Mat dst(dftImg, Rect(0, 0, dsz.width, dsz.height));
        Mat dst1(dftImg, Rect(x1-x0, y1-y0, x2-x1, y2-y1));
        Mat cdst(corr, Rect(x, y, bsz.width, bsz.height));

        for( k = 0; k < cn; k++ )
        {
            Mat src = src0;
            dftImg = Scalar::all(0);

            if( cn > 1 )
            {
                src = depth == maxDepth ? dst1 : Mat(y2-y1, x2-x1, depth, &buf[0]);
                int pairs[] = {k, 0};
                mixChannels(&src0, 1, &src, 1, pairs, 1);
            }

            if( dst1.data != src.data )
                src.convertTo(dst1, dst1.depth());

            if( x2 - x1 < dsz.width || y2 - y1 < dsz.height )
                copyMakeBorder(dst1, dst, y1-y0, dst.rows-dst1.rows-(y1-y0),
                               x1-x0, dst.cols-dst1.cols-(x1-x0), borderType);

            dft( dftImg, dftImg, 0, dsz.height );
            Mat dftTempl1(dftTempl, Rect(0, tcn > 1 ? k*dftsize.height : 0,
                                         dftsize.width, dftsize.height));
            mulSpectrums(dftImg, dftTempl1, dftImg, 0, true);
            dft( dftImg, dftImg, DFT_INVERSE + DFT_SCALE, bsz.height );

            src = dftImg(Rect(0, 0, bsz.width, bsz.height));

            if( ccn > 1 )
            {
                if( cdepth != maxDepth )
                {
                    Mat plane(bsz, cdepth, &buf[0]);
                    src.convertTo(plane, cdepth, 1, delta);
                    src = plane;
                }
                int pairs[] = {0, k};
                mixChannels(&src, 1, &cdst, 1, pairs, 1);
            }
            else
            {
                if( k == 0 )
                    src.convertTo(cdst, cdepth, 1, delta);
                else
                {
                    if( maxDepth != cdepth )
                    {
                        Mat plane(bsz, cdepth, &buf[0]);
                        src.convertTo(plane, cdepth);
                        src = plane;
                    }
                    add(src, cdst, cdst);
                }
            }
        }
    }
}
void TrackerCvBlobsLib::trackBlobs(const Mat &mat, const Mat &areaMask, bool history, std::vector<Area> *pareas)
{
	double min_area = *m_pmin_area;
	double max_area = *m_pmax_area;
	double max_radius_2 = *m_pmax_radius * *m_pmax_radius;
	double x, y, min_x, min_y, max_x, max_y;
	double min_depth,max_depth;
	Scalar sdepth, sstddev;
	cBlob temp;
	bool new_hand(true);
	int mat_area(mat.size().width*mat.size().height);

	// we will convert the matrix object passed from our cFilter class to an object of type IplImage for calling the CBlobResult constructor
	IplImage img;
	IplImage areaImg;

	// storage of the current blobs and the blobs from the previous frame
	Size s;	Point p;
	mat.locateROI(s,p);
	areaImg = areaMask;

	// convert our OpenCV matrix object to one of type IplImage
	img = mat;

	// cvblobslib blob extraction
	blob_result = CBlobResult(&img, NULL, 1/*img∈{0,255}->thresh unimportant*/, false);
	blob_result.Filter(blob_result, B_EXCLUDE, CBlobGetArea(), B_LESS, min_area); 
	blob_result.Filter(blob_result, B_EXCLUDE, CBlobGetArea(), B_GREATER, max_area);

	// clear the blobs from two frames ago
	blobs_previous.clear();
	
	// before we populate the blobs vector with the current frame, we need to store the live blobs in blobs_previous
	for (int i = 0; i < blobs.size(); i++)
		if (blobs[i].event != BLOB_UP)
			blobs_previous.push_back(blobs[i]);


	// populate the blobs vector with the current frame
	blobs.clear();
	for (int i = 0; i < blob_result.GetNumBlobs(); i++) {
		current_blob = blob_result.GetBlob(i);

		x     = XCenter(current_blob)/*+m_pSettingKinect->m_kinectProp.roi.x*/;
		y     = YCenter(current_blob)/*+m_pSettingKinect->m_kinectProp.roi.y*/;

//		temp.areaid = areaMask.at<uchar>((int)x+p.x,(int)y+p.y);//?!not works
		temp.areaid = (uchar) areaImg.imageData[ ((int)x+p.x) + ((int)y+p.y)*areaMask.size().width];//works
		if( temp.areaid == 0 ) continue;

		min_x = MinX(current_blob);
		min_y = MinY(current_blob);
		max_x = MaxX(current_blob);
		max_y = MaxY(current_blob);

		if( (max_x-min_x)*(max_y-min_y) > 0.9*mat_area) continue;// fix blob detection issue?!

		temp.location.x = temp.origin.x = x;
		temp.location.y = temp.origin.y = y;
		temp.min.x = min_x; temp.min.y = min_y;
		temp.max.x = max_x; temp.max.y = max_y;

		//Rect r(min_x+p.x,min_y+p.x, max_x-min_y, max_y-min_y);
		//Rect r(min_x,min_y, max_x-min_x, max_y-min_y);//width, height +1?!
		Rect r( x-3, y-3, min(7,max_x-min_x), min(7, max_y-min_y));


		//z = mean( mat(r), mat(r) )[0];/* mean is not good. The blob can include many pixel behind the frame depth*/

		/* Depth detection. The measurement method is flexible. */
		if( m_pSettingKinect->m_kinectProp.areaThresh ){
			/* Mean is ok, because all pixels of the blob are in front of the frame. */
			max_depth = mean( mat(r), mat(r) )[0]+4;/*correct blur(1) and area thresh shift (3)*/
			//meanStdDev( mat(r), sdepth, sstddev, mat(r) );
			//max_depth = sdepth[0]+3*sstddev[0];
			//minMaxLoc( mat(r), &min_depth, &max_depth, NULL, NULL, mat(r) );

		}else	if( pareas != NULL){
			/* Remove values behind the area depth and count mean of rest.
				This is problematic/choppy if to many pixels are removed.
			*/
			max_depth = max( (*pareas)[temp.areaid-1].depth-22, 
					mean( mat(r), mat(r)>(*pareas)[temp.areaid-1].depth-2 )[0] + 1);

		}else{
			/* Very few information. Use maximum of blob. (Choppy).
			 * Can be improved, if mean of i.e. 10 biggest values is used
			 * minMaxLoc require filtered/blured images.
			 * */
			//max_depth = 0;
			minMaxLoc( mat(r), &min_depth, &max_depth, NULL, NULL, mat(r) );
		}
		//printf("Compared depth of area/blob: %i %f\n",(*pareas)[temp.areaid-1].depth ,max_depth);

		/* Compare depth of hand with depth of area and throw blob away if hand to far away. */
		if(pareas != NULL && max_depth - (*pareas)[temp.areaid-1].depth < -1 ){
			//printf("Hand not reached area depth.\n");
			continue ;
		}

		temp.location.z = temp.origin.z = max_depth;

		blobs.push_back(temp);
	}

	// initialize previous blobs to untracked
	float d1,d2;
	for (int i = 0; i < blobs_previous.size(); i++) blobs_previous[i].tracked = false;

	// main tracking loop -- O(n^2) -- simply looks for a blob in the previous frame within a specified radius
	for (int i = 0; i < blobs.size(); i++) {
		cBlob &blobi = blobs[i];
		new_hand = true;
		for (int j = 0; j < blobs_previous.size(); j++) {
			if (blobs_previous[j].tracked) continue;

			d1=blobs[i].location.x - blobs_previous[j].location.x;
			d2=blobs[i].location.y - blobs_previous[j].location.y;
			if (blobs[i].areaid == blobs_previous[j].areaid 
					&& (d1*d1 + d2*d2) < max_radius_2) {
				blobs_previous[j].tracked = true;
				blobs[i].event = BLOB_MOVE;
				blobs[i].origin.x = history ? blobs_previous[j].origin.x : blobs_previous[j].location.x;
				blobs[i].origin.y = history ? blobs_previous[j].origin.y : blobs_previous[j].location.y;
				blobs[i].origin.z = history ? blobs_previous[j].origin.z : blobs_previous[j].location.z;

				blobs[i].handid = blobs_previous[j].handid;
				blobs[i].cursor = blobs_previous[j].cursor;
				blobs[i].cursor25D = blobs_previous[j].cursor25D;
				new_hand = false;
				break;
			}
		}
		/* assing free handid if new blob */
		if( new_hand){

			//search next free id.
			int next_handid = (last_handid+1) % MAXHANDS;//or = 0;
			while( handids[next_handid]==true && next_handid!=last_handid ){
					next_handid = (next_handid+1) % MAXHANDS;
			} //if array full -> next_handid = last_handid

			blobs[i].event = BLOB_DOWN;
			blobs[i].handid = next_handid;
			blobs[i].cursor = NULL;
			blobs[i].cursor25D = NULL;

			handids[next_handid] = true;
			last_handid = next_handid;
		}
	}

	// add any blobs from the previous frame that weren't tracked as having been removed
	for (int i = 0; i < blobs_previous.size(); i++) {
		if (!blobs_previous[i].tracked) {
			//free handid
			handids[blobs_previous[i].handid] = false;
			blobs_previous[i].event = BLOB_UP;
			blobs.push_back(blobs_previous[i]);
		}
	}
/*
	for (int i = 1; i < blob_result.GetNumBlobs(); i++) {
				current_blob = blob_result.GetBlob(i);
				printf("Blobcoordsd %f, %f\n", XCenter(current_blob), YCenter(current_blob) );
	}
*/
	int counter = 0;
	cBlob tb;
	for (int i = 0; i < blobs.size(); i++) {
			if( blobs[i].event != BLOB_UP ){
				counter++;
				tb = blobs[i];
				//printf("Blobcoordsd %f, %f\n", blobs[i].location.x, blobs[i].location.y );
				//printf("Blob areaid: %i, handid: %i, (%f,%f)\n", blobs[i].areaid, blobs[i].handid, blobs[i].location.x, blobs[i].location.y );
				if(! *m_pnotDrawBlob ){
					cvLine(&img,
							Point((int)tb.origin.x,(int)tb.origin.y),
							Point((int)tb.location.x,(int)tb.location.y),Scalar(244),2);
					cvRectangle(&img,
							Point((int)tb.min.x,(int)tb.min.y),
							Point((int)tb.max.x,(int)tb.max.y),Scalar(255),2);
				}
			}
	}
//	printf("Active blobs: %i %i %i\n",counter, blobs.size(), blob_result.GetNumBlobs());
/*	for(int i=0; i<MAXHANDS; i++){
		printf("%i,", (handids[i]==true)?1:0);
	}
	printf("\n");*/
}