Exemplo n.º 1
0
void CCalibrateKinect::undistortImages ( const std::vector< cv::Mat >& vImages_,  const cv::Mat_<double>& cvmK_, const cv::Mat_<double>& cvmInvK_, const cv::Mat_<double>& cvmDistCoeffs_, std::vector< cv::Mat >* pvUndistorted_ ) const
{
    std::cout << "undistortImages() "<< std::endl << std::flush;
    CHECK ( !vImages_.empty(),      "undistortImages(): # of undistorted images can not be zero.\n" );
    CHECK ( !cvmK_.empty(),         "undistortImages(): K matrix cannot be empty.\n" );
    CHECK ( !cvmInvK_.empty(),      "undistortImages(): inverse of K matrix cannot be empty.\n" );
    CHECK ( !cvmDistCoeffs_.empty(), "undistortImages(): distortion coefficients cannot be empty.\n" );

    cv::Size cvFrameSize = vImages_[0].size(); //x,y;
    pvUndistorted_->clear();

    for ( unsigned int n = 0; n < vImages_.size(); n++ )
    {
        cv::Mat cvUndistorted;
        std::cout << "distort: "<< n << "-th image.\n"<< std::flush;
        undistortImage ( vImages_[n],  cvmK_, cvmInvK_, cvmDistCoeffs_, &cvUndistorted );
        pvUndistorted_->push_back ( cvUndistorted );

        //string strNum = boost::lexical_cast< std::string> ( n );
        //string strRGBUndistortedFileName = "rgbUndistorted" + strNum + ".bmp";
        //cv::imwrite ( strRGBUndistortedFileName, cvUndistorted );
    }

    return;
}
void
ObjTrackerMono::drawCoordinateSystem(cv::Mat &im, const Eigen::Matrix4f &pose, const cv::Mat_<double> &intrinsic, const cv::Mat_<double> &dist_coeffs, double size, int thickness) const
{
    Eigen::Matrix3f R = pose.topLeftCorner<3,3>();
    Eigen::Vector3f t = pose.block<3, 1>(0,3);

    Eigen::Vector3f pt0  = R * Eigen::Vector3f(0,0,0) + t;
    Eigen::Vector3f pt_x = R * Eigen::Vector3f(size,0,0) + t;
    Eigen::Vector3f pt_y = R * Eigen::Vector3f(0,size,0) + t;
    Eigen::Vector3f pt_z = R * Eigen::Vector3f(0,0,size) +t ;

    cv::Point2f im_pt0, im_pt_x, im_pt_y, im_pt_z;

    if (!dist_coeffs.empty())
    {
        v4r::projectPointToImage(&pt0 [0], &intrinsic(0), &dist_coeffs(0), &im_pt0.x );
        v4r::projectPointToImage(&pt_x[0], &intrinsic(0), &dist_coeffs(0), &im_pt_x.x);
        v4r::projectPointToImage(&pt_y[0], &intrinsic(0), &dist_coeffs(0), &im_pt_y.x);
        v4r::projectPointToImage(&pt_z[0], &intrinsic(0), &dist_coeffs(0), &im_pt_z.x);
    }
    else
    {
        v4r::projectPointToImage(&pt0 [0], &intrinsic(0), &im_pt0.x );
        v4r::projectPointToImage(&pt_x[0], &intrinsic(0), &im_pt_x.x);
        v4r::projectPointToImage(&pt_y[0], &intrinsic(0), &im_pt_y.x);
        v4r::projectPointToImage(&pt_z[0], &intrinsic(0), &im_pt_z.x);
    }

    cv::line(im, im_pt0, im_pt_x, CV_RGB(255,0,0), thickness);
    cv::line(im, im_pt0, im_pt_y, CV_RGB(0,255,0), thickness);
    cv::line(im, im_pt0, im_pt_z, CV_RGB(0,0,255), thickness);
}
void FaceAnalyser::UpdateRunningMedian(cv::Mat_<unsigned int>& histogram, int& hist_count, cv::Mat_<double>& median, const cv::Mat_<double>& descriptor, bool update, int num_bins, double min_val, double max_val)
{

	double length = max_val - min_val;
	if(length < 0)
		length = -length;

	// The median update
	if(histogram.empty())
	{
		histogram = Mat_<unsigned int>(descriptor.cols, num_bins, (unsigned int)0);
		median = descriptor.clone();
	}

	if(update)
	{
		// Find the bins corresponding to the current descriptor
		Mat_<double> converted_descriptor = (descriptor - min_val)*((double)num_bins)/(length);

		// Capping the top and bottom values
		converted_descriptor.setTo(Scalar(num_bins-1), converted_descriptor > num_bins - 1);
		converted_descriptor.setTo(Scalar(0), converted_descriptor < 0);

		// Only count the median till a certain number of frame seen?
		for(int i = 0; i < histogram.rows; ++i)
		{
			int index = (int)converted_descriptor.at<double>(i);
			histogram.at<unsigned int>(i, index)++;
		}

		// Update the histogram count
		hist_count++;
	}

	if(hist_count == 1)
	{
		median = descriptor.clone();
	}
	else
	{
		// Recompute the median
		int cutoff_point = (hist_count + 1)/2;

		// For each dimension
		for(int i = 0; i < histogram.rows; ++i)
		{
			int cummulative_sum = 0;
			for(int j = 0; j < histogram.cols; ++j)
			{
				cummulative_sum += histogram.at<unsigned int>(i, j);
				if(cummulative_sum > cutoff_point)
				{
					median.at<double>(i) = min_val + j * (length/num_bins) + (0.5*(length)/num_bins);
					break;
				}
			}
		}
	}
}
Exemplo n.º 4
0
	void AddDescriptor(cv::Mat_<double>& descriptors, cv::Mat_<double> new_descriptor, int curr_frame, int num_frames_to_keep)
	{
		if(descriptors.empty())
		{
			descriptors = Mat_<double>(num_frames_to_keep, new_descriptor.cols, 0.0);
		}

		int row_to_change = curr_frame % num_frames_to_keep;

		new_descriptor.copyTo(descriptors.row(row_to_change));
	}	
Exemplo n.º 5
0
void FaceAnalyser::UpdatePredictionTrack(cv::Mat_<int>& prediction_corr_histogram, int& prediction_correction_count, vector<double>& correction, const vector<pair<string, double>>& predictions, double ratio, int num_bins, double min_val, double max_val, int min_frames)
{
	double length = max_val - min_val;
	if(length < 0)
		length = -length;

	correction.resize(predictions.size(), 0);

	// The median update
	if(prediction_corr_histogram.empty())
	{
		prediction_corr_histogram = cv::Mat_<int>((int)predictions.size(), num_bins, (int)0);
	}
	
	for(int i = 0; i < prediction_corr_histogram.rows; ++i)
	{
		// Find the bins corresponding to the current descriptor
		int index = (int)((predictions[i].second - min_val)*((double)num_bins)/(length));
		if(index < 0)
		{
			index = 0;
		}
		else if(index > num_bins - 1)
		{
			index = num_bins - 1;
		}
		prediction_corr_histogram.at<int>(i, index)++;
	}

	// Update the histogram count
	prediction_correction_count++;

	if(prediction_correction_count >= min_frames)
	{
		// Recompute the correction
		int cutoff_point = (int)(ratio * prediction_correction_count);

		// For each dimension
		for(int i = 0; i < prediction_corr_histogram.rows; ++i)
		{
			int cummulative_sum = 0;
			for(int j = 0; j < prediction_corr_histogram.cols; ++j)
			{
				cummulative_sum += prediction_corr_histogram.at<int>(i, j);
				if(cummulative_sum > cutoff_point)
				{
					double corr = min_val + j * (length/num_bins);
					correction[i] = corr;
					break;
				}
			}
		}
	}
}
Exemplo n.º 6
0
inline std::vector<int> zigzag(const cv::Mat_<float>& mat) 
{
    CV_Assert(!mat.empty());
    CV_Assert(mat.rows==mat.cols && mat.rows==nBlockSize);
	int nIdx = 0;
	std::vector<int> zigzag(nBlockSize*nBlockSize);
	
	for (int i = 0; i < nBlockSize * 2; ++i)
		for (int j = (i < nBlockSize) ? 0 : i - nBlockSize + 1; j <= i && j < nBlockSize; ++j)
			zigzag[nIdx++] = mat((i & 1) ? j*(nBlockSize - 1) + i : (i - j)*nBlockSize + j);

	return zigzag;
}
void FaceAnalyser::ExtractMedian(cv::Mat_<unsigned int>& histogram, int hist_count, cv::Mat_<double>& median, int num_bins, double min_val, double max_val)
{

	double length = max_val - min_val;
	if(length < 0)
		length = -length;

	// The median update
	if(histogram.empty())
	{
		return;
	}
	else
	{
		if(median.empty())
		{
			median = Mat_<double>(1, histogram.rows, 0.0);
		}

		// Compute the median
		int cutoff_point = (hist_count + 1)/2;

		// For each dimension
		for(int i = 0; i < histogram.rows; ++i)
		{
			int cummulative_sum = 0;
			for(int j = 0; j < histogram.cols; ++j)
			{
				cummulative_sum += histogram.at<unsigned int>(i, j);
				if(cummulative_sum > cutoff_point)
				{
					median.at<double>(i) = min_val + j * (max_val/num_bins) + (0.5*(length)/num_bins);
					break;
				}
			}
		}
	}
}
Exemplo n.º 8
0
cv::Mat tp3::convo(const cv::Mat& oImage, const cv::Mat_<float>& oKernel) {
    CV_Assert(!oImage.empty() && oImage.type()==CV_8UC3 && oImage.isContinuous());
    CV_Assert(!oKernel.empty() && oKernel.cols==oKernel.rows && oKernel.isContinuous());
    CV_Assert(oImage.cols>oKernel.cols && oImage.rows>oKernel.rows);
    cv::Mat oResult(oImage.size(),CV_32FC3);

		for (int row_index = 0; row_index < oImage.rows; ++row_index)
		{
			for (int col_index = 0; col_index < oImage.cols; ++col_index)
			{
				float resultBlue = calculate_convolution(oImage, oKernel, blue, row_index, col_index) / 255;
				float resultGreen = calculate_convolution(oImage, oKernel, green, row_index, col_index) / 255;
				float resultRed = calculate_convolution(oImage, oKernel, red, row_index, col_index) / 255;

				cv::Vec3f result = cv::Vec3f(resultBlue, resultGreen, resultRed);

				oResult.at<cv::Vec3f>(row_index, col_index) = result;
			}
		}

    return oResult;
}
Exemplo n.º 9
0
//===========================================================================
void CCNF_neuron::Response(cv::Mat_<float> &im, cv::Mat_<double> &im_dft, cv::Mat &integral_img, cv::Mat &integral_img_sq, cv::Mat_<float> &resp)
{

	int h = im.rows - weights.rows + 1;
	int w = im.cols - weights.cols + 1;
	
	// the patch area on which we will calculate reponses
	cv::Mat_<float> I;

	if(neuron_type == 3)
	{
		// Perform normalisation across whole patch (ignoring the invalid values indicated by <= 0

		cv::Scalar mean;
		cv::Scalar std;
		
		// ignore missing values
		cv::Mat_<uchar> mask = im > 0;
		cv::meanStdDev(im, mean, std, mask);

		// if all values the same don't divide by 0
		if(std[0] != 0)
		{
			I = (im - mean[0]) / std[0];
		}
		else
		{
			I = (im - mean[0]);
		}

		I.setTo(0, mask == 0);
	}
	else
	{
		if(neuron_type == 0)
		{
			I = im;
		}
		else
		{
			printf("ERROR(%s,%d): Unsupported patch type %d!\n", __FILE__,__LINE__,neuron_type);
			abort();
		}
	}
  
	if(resp.empty())
	{		
		resp.create(h, w);
	}

	// The response from neuron before activation
	if(neuron_type == 3)
	{
		// In case of depth we use per area, rather than per patch normalisation
		matchTemplate_m(I, im_dft, integral_img, integral_img_sq, weights, weights_dfts, resp, CV_TM_CCOEFF); // the linear multiplication, efficient calc of response
	}
	else
	{
		matchTemplate_m(I, im_dft, integral_img, integral_img_sq, weights, weights_dfts, resp, CV_TM_CCOEFF_NORMED); // the linear multiplication, efficient calc of response
	}

	cv::MatIterator_<float> p = resp.begin();

	cv::MatIterator_<float> q1 = resp.begin(); // respone for each pixel
	cv::MatIterator_<float> q2 = resp.end();

	// the logistic function (sigmoid) applied to the response
	while(q1 != q2)
	{
		*p++ = (2 * alpha) * 1.0 /(1.0 + exp( -(*q1++ * norm_weights + bias )));
	}

}
Exemplo n.º 10
0
cv::Mat_<byte> DepthSegmenter::calcMostCommonDepthMask( const cv::Rect subRect, cv::Mat_<float> mask) const
{
    cv::Mat_<float> subMat = _rngMat;
    if ( subRect.area() > 0)
        subMat = _rngMat(subRect);
    if ( mask.empty())
        mask = cv::Mat_<float>::ones(subMat.size());
    assert( mask.rows == subMat.rows && mask.cols == subMat.cols);

    // Find the most common depth 
    int *bins = (int*)calloc( _depthLevels, sizeof(int));   // Zero'd
    float *means = (float*)calloc( _depthLevels, sizeof(float));    // Zero'd within bin depth means
    std::vector< std::vector<float> > dvals( _depthLevels);

    const double rngDelta = _maxRng - _minRng;

    int topIdx = 0; // Remember top index (most hits)
    int maxCnt = 0;
    const int rows = subMat.rows;
    const int cols = subMat.cols;
    for ( int i = 0; i < rows; ++i)
    {
        const float *pxRow = subMat.ptr<float>(i);
        const float *maskRow = mask.ptr<float>(i);

        for ( int j = 0; j < cols; ++j)
        {
            if ( maskRow[j])    // Ignore zero values
            {
                const float depth = pxRow[j];
                if ( depth <= 0)
                    continue;

                const int b = binVal( bins, means, depth, rngDelta);
                if ( b < 0) // Out of range so continue
                    continue;

                dvals[b].push_back(depth);  // Store the depth value itself for later std-dev calc
                if ( bins[b] > maxCnt)
                {
                    topIdx = b;
                    maxCnt = bins[b];
                }   // end if
            }   // end if
        }   // end for - cols
    }   // end for - rows

    /*
    for ( int i = 0; i < _depthLevels; ++i)
        cerr << "Bin " << i << ": " << bins[i] << endl;
    cerr << "Top bin = " << topIdx << endl;
    */
    free(bins);

    const float meanDepth = means[topIdx];    // Mean of most common depth values
    //free(means);

    // Calculate the std-dev for the most common depth interval
    const std::vector<float>& vds = dvals[topIdx];
    const int sz = (int)vds.size();
    double sumSqDiffs = 0;
    for ( int i = 0; i < sz; ++i)
        sumSqDiffs += pow(vds[i]-meanDepth,2);
    const double stddev = sqrt(sumSqDiffs/sz);
    _lastStdDev = float(stddev);

    // Identify the pixels closest to meanDepth within c*stddev
    const float withinRng = _inlierFactor * _lastStdDev;

    float minDepth = FLT_MAX;
    float maxDepth = 0;
    float totDepth = 0;
    int pxCount = 0;

    cv::Mat_<byte> outMat( rows, cols);
    for ( int i = 0; i < rows; ++i)
    {
        byte* outRow = outMat.ptr<byte>(i);
        const float* inRow = subMat.ptr<float>(i);
        for ( int j = 0; j < cols; ++j)
        {
            const float depth = inRow[j];
            if ( depth > 0 && (fabs(depth - meanDepth) < withinRng))
            {
                outRow[j] = 0xff;
                totDepth += depth;
                pxCount++;
                if ( depth < minDepth)
                    minDepth = depth;
                if ( depth > maxDepth)
                    maxDepth = depth;
            }   // end if
            else
                outRow[j] = 0;
        }   // end for
    }   // end for

    _lastMinDepth = minDepth;
    _lastMaxDepth = maxDepth;
    _lastAvgDepth = totDepth/pxCount;

    //cv::fastFree(dvals);
    return outMat;
}   // end calcMostCommonDepthMask
Exemplo n.º 11
0
// Returns the patch expert responses given a grayscale and an optional depth image.
// Additionally returns the transform from the image coordinates to the response coordinates (and vice versa).
// The computation also requires the current landmark locations to compute response around, the PDM corresponding to the desired model, and the parameters describing its instance
// Also need to provide the size of the area of interest and the desired scale of analysis
void Patch_experts::Response(vector<cv::Mat_<float> >& patch_expert_responses, cv::Matx22f& sim_ref_to_img, cv::Matx22d& sim_img_to_ref, const cv::Mat_<uchar>& grayscale_image, const cv::Mat_<float>& depth_image,
							 const PDM& pdm, const cv::Vec6d& params_global, const cv::Mat_<double>& params_local, int window_size, int scale)
{

	int view_id = GetViewIdx(params_global, scale);		

	int n = pdm.NumberOfPoints();
		
	// Compute the current landmark locations (around which responses will be computed)
	cv::Mat_<double> landmark_locations;

	pdm.CalcShape2D(landmark_locations, params_local, params_global);

	cv::Mat_<double> reference_shape;
		
	// Initialise the reference shape on which we'll be warping
	cv::Vec6d global_ref(patch_scaling[scale], 0, 0, 0, 0, 0);

	// Compute the reference shape
	pdm.CalcShape2D(reference_shape, params_local, global_ref);
		
	// similarity and inverse similarity transform to and from image and reference shape
	cv::Mat_<double> reference_shape_2D = (reference_shape.reshape(1, 2).t());
	cv::Mat_<double> image_shape_2D = landmark_locations.reshape(1, 2).t();

	sim_img_to_ref = AlignShapesWithScale(image_shape_2D, reference_shape_2D);
	cv::Matx22d sim_ref_to_img_d = sim_img_to_ref.inv(cv::DECOMP_LU);

	double a1 = sim_ref_to_img_d(0,0);
	double b1 = -sim_ref_to_img_d(0,1);
		
	sim_ref_to_img(0,0) = (float)sim_ref_to_img_d(0,0);
	sim_ref_to_img(0,1) = (float)sim_ref_to_img_d(0,1);
	sim_ref_to_img(1,0) = (float)sim_ref_to_img_d(1,0);
	sim_ref_to_img(1,1) = (float)sim_ref_to_img_d(1,1);

	// Indicates the legal pixels in a depth image, if available (used for CLM-Z area of interest (window) interpolation)
	cv::Mat_<uchar> mask;
	if(!depth_image.empty())
	{
		mask = depth_image > 0;			
		mask = mask / 255;
	}		
	

	bool use_ccnf = !this->ccnf_expert_intensity.empty();

	// If using CCNF patch experts might need to precalculate Sigmas
	if(use_ccnf)
	{
		vector<cv::Mat_<float> > sigma_components;

		// Retrieve the correct sigma component size
		for( size_t w_size = 0; w_size < this->sigma_components.size(); ++w_size)
		{
			if(!this->sigma_components[w_size].empty())
			{
				if(window_size*window_size == this->sigma_components[w_size][0].rows)
				{
					sigma_components = this->sigma_components[w_size];
				}
			}
		}			

		// Go through all of the landmarks and compute the Sigma for each
		for( int lmark = 0; lmark < n; lmark++)
		{
			// Only for visible landmarks
			if(visibilities[scale][view_id].at<int>(lmark,0))
			{
				// Precompute sigmas if they are not computed yet
				ccnf_expert_intensity[scale][view_id][lmark].ComputeSigmas(sigma_components, window_size);
			}
		}

	}

	// calculate the patch responses for every landmark, Actual work happens here. If openMP is turned on it is possible to do this in parallel,
	// this might work well on some machines, while potentially have an adverse effect on others
#ifdef _OPENMP
#pragma omp parallel for
#endif
	tbb::parallel_for(0, (int)n, [&](int i){
	//for(int i = 0; i < n; i++)
	{
			
		if(visibilities[scale][view_id].rows == n)
		{
			if(visibilities[scale][view_id].at<int>(i,0) != 0)
			{

				// Work out how big the area of interest has to be to get a response of window size
				int area_of_interest_width;
				int area_of_interest_height;

				if(use_ccnf)
				{
					area_of_interest_width = window_size + ccnf_expert_intensity[scale][view_id][i].width - 1; 
					area_of_interest_height = window_size + ccnf_expert_intensity[scale][view_id][i].height - 1;				
				}
				else
				{
					area_of_interest_width = window_size + svr_expert_intensity[scale][view_id][i].width - 1; 
					area_of_interest_height = window_size + svr_expert_intensity[scale][view_id][i].height - 1;
				}
			
				// scale and rotate to mean shape to reference frame
				cv::Mat sim = (cv::Mat_<float>(2,3) << a1, -b1, landmark_locations.at<double>(i,0), b1, a1, landmark_locations.at<double>(i+n,0));

				// Extract the region of interest around the current landmark location
				cv::Mat_<float> area_of_interest(area_of_interest_height, area_of_interest_width);

				// Using C style openCV as it does what we need
				CvMat area_of_interest_o = area_of_interest;
				CvMat sim_o = sim;
				IplImage im_o = grayscale_image;			
				cvGetQuadrangleSubPix(&im_o, &area_of_interest_o, &sim_o);
			
				// get the correct size response window			
				patch_expert_responses[i] = cv::Mat_<float>(window_size, window_size);

				// Get intensity response either from the SVR or CCNF patch experts (prefer CCNF)
				if(!ccnf_expert_intensity.empty())
				{				

					ccnf_expert_intensity[scale][view_id][i].Response(area_of_interest, patch_expert_responses[i]);
				}
				else
				{
					svr_expert_intensity[scale][view_id][i].Response(area_of_interest, patch_expert_responses[i]);
				}
			
				// if we have a corresponding depth patch and it is visible		
				if(!svr_expert_depth.empty() && !depth_image.empty() && visibilities[scale][view_id].at<int>(i,0))
				{

					cv::Mat_<float> dProb = patch_expert_responses[i].clone();
					cv::Mat_<float> depthWindow(area_of_interest_height, area_of_interest_width);
			

					CvMat dimg_o = depthWindow;
					cv::Mat maskWindow(area_of_interest_height, area_of_interest_width, CV_32F);
					CvMat mimg_o = maskWindow;

					IplImage d_o = depth_image;
					IplImage m_o = mask;

					cvGetQuadrangleSubPix(&d_o,&dimg_o,&sim_o);
				
					cvGetQuadrangleSubPix(&m_o,&mimg_o,&sim_o);

					depthWindow.setTo(0, maskWindow < 1);

					svr_expert_depth[scale][view_id][i].ResponseDepth(depthWindow, dProb);
							
					// Sum to one
					double sum = cv::sum(patch_expert_responses[i])[0];

					// To avoid division by 0 issues
					if(sum == 0)
					{
						sum = 1;
					}

					patch_expert_responses[i] /= sum;

					// Sum to one
					sum = cv::sum(dProb)[0];
					// To avoid division by 0 issues
					if(sum == 0)
					{
						sum = 1;
					}

					dProb /= sum;

					patch_expert_responses[i] = patch_expert_responses[i] + dProb;

				}
			}
		}
	}
	});

}
Exemplo n.º 12
0
void CFFilter::GetCrossUsingSlidingWindow(const cv::Mat_<cv::Vec3b> &img, cv::Mat_<cv::Vec4b> &crMap, int maxL, int tau)
{
	if ((img.data == NULL) || img.empty()) return;

	////CalcTime ct;
	//ct.Start();

	int width, height;
	width = img.cols;
	height = img.rows;

	crMap.create(height, width);
	int iy, ix;
#pragma omp parallel for private(iy) schedule(guided, 1)
	for (iy=0; iy<height; ++iy)
	{
		// to determine right most, cr[2]
		cv::Vec3i sumColor;
		int lp, rp;
		rp = 0;
		for (lp=0; lp<width; ++lp)
		{
			int diff = rp-lp;
			if (diff == 0)
			{
				sumColor = img[iy][rp];
				++rp;
				++diff;
			}

			while (diff <= maxL)
			{
				if (rp >= width) break;

				//if (abs(sumColor[0]/diff-img[iy][rp][0])>tau || abs(sumColor[1]/diff-img[iy][rp][1])>tau
				//	|| abs(sumColor[2]/diff-img[iy][rp][2])>tau) break;
				if (abs(sumColor[0]-img[iy][rp][0]*diff)>tau*diff 
					|| abs(sumColor[1]-img[iy][rp][1]*diff)>tau*diff
					|| abs(sumColor[2]-img[iy][rp][2]*diff)>tau*diff) break;
				sumColor += img[iy][rp];
				++diff;
				++rp;
			}
			sumColor -= img[iy][lp];

			crMap[iy][lp][2] = diff-1;
		}

		// to determine left most, cr[0]
		lp = width-1;
		for (rp=width-1; rp>=0; --rp)
		{
			int diff = rp-lp;
			if (diff == 0)
			{
				sumColor = img[iy][lp];
				--lp;
				++diff;
			}
			while (diff <= maxL)
			{
				if (lp < 0) break;
				//if (abs(sumColor[0]/diff-img[iy][lp][0])>tau || abs(sumColor[1]/diff-img[iy][lp][1])>tau
				//	|| abs(sumColor[2]/diff-img[iy][lp][2])>tau) break;
				if (abs(sumColor[0]-img[iy][lp][0]*diff)>tau*diff 
					|| abs(sumColor[1]-img[iy][lp][1]*diff)>tau*diff
					|| abs(sumColor[2]-img[iy][lp][2]*diff)>tau*diff) break;
				sumColor += img[iy][lp];
				++diff;
				--lp;
			}

			sumColor -= img[iy][rp];

			crMap[iy][rp][0] = diff-1;
		}
	}

	// determine up and down arm length
#pragma omp parallel for private(ix) schedule(guided, 1)
	for (ix=0; ix<width; ++ix)
	{
		// to determine down most, cr[3]
		cv::Vec3i sumColor;
		int up, dp;
		dp = 0;
		for (up=0; up<height; ++up)
		{
			int diff = dp-up;
			if (diff == 0)
			{
				sumColor = img[dp][ix];
				++dp;
				++diff;
			}
			while (diff <= maxL)
			{
				if (dp >= height) break;
				//if (abs(sumColor[0]/diff-img[dp][ix][0])>tau || abs(sumColor[1]/diff-img[dp][ix][1])>tau
				//	|| abs(sumColor[2]/diff-img[dp][ix][2])>tau) break;
				if (abs(sumColor[0]-img[dp][ix][0]*diff)>tau*diff 
					|| abs(sumColor[1]-img[dp][ix][1]*diff)>tau*diff
					|| abs(sumColor[2]-img[dp][ix][2]*diff)>tau*diff) break;
				sumColor += img[dp][ix];
				++diff;
				++dp;
			}

			sumColor -= img[up][ix];

			crMap[up][ix][3] = diff-1;
		}

		// to determine up most, cr[1]
		up = height-1;
		for (dp=height-1; dp>=0; --dp)
		{
			int diff = dp-up;
			if (diff == 0)
			{
				sumColor = img[up][ix];
				--up;
				++diff;
			}

			while (diff <= maxL)
			{
				if (up < 0) break;
				//if (abs(sumColor[0]/diff-img[up][ix][0])>tau || abs(sumColor[1]/diff-img[up][ix][1])>tau
				//	|| abs(sumColor[2]/diff-img[up][ix][2])>tau) break;
				if (abs(sumColor[0]-img[up][ix][0]*diff)>tau*diff
					|| abs(sumColor[1]-img[up][ix][1]*diff)>tau*diff
					|| abs(sumColor[2]-img[up][ix][2]*diff)>tau*diff) break;
				sumColor += img[up][ix];
				++diff;
				--up;
			}

			sumColor -= img[dp][ix];

			crMap[dp][ix][1] = diff-1;
		}
	}
	
	//ct.End("SlWin Construct CrossMap");
}