Exemple #1
0
void *Gear_FaceTrack::loopThread()
{
  _keepLooping = true;

  while (_keepLooping)
  {
    // Tell the filter that we want a new frame
    pthread_cond_wait(&_inputCond, &_inputMutex);
    if ( !_keepLooping )
      break;
    
    _pCurrentFrame = (unsigned char*) _grayImage.data();
    rgba2grayscale(_pCurrentFrame, (unsigned char*)_image->data(), _image->size());
    
    computeIntegralImages();
    findFaces();
    
    pthread_mutex_lock(&_outputMutex);
    //    mergeFaces();
    _drawingFoundFaces =  _foundFaces;
    pthread_mutex_unlock(&_outputMutex);
  }

  NOTICE("Exiting thread");
//   pthread_mutex_unlock(&_exitMutex);
  pthread_exit(NULL);
  return NULL;
}
template <typename DataType, unsigned Dimension> void
pcl::IntegralImage2D<DataType, Dimension>::setInput (const DataType * data, unsigned width,unsigned height, unsigned element_stride, unsigned row_stride)
{
  if ((width + 1) * (height + 1) > first_order_integral_image_.size () )
  {
    width_  = width;
    height_ = height;
    first_order_integral_image_.resize ( (width_ + 1) * (height_ + 1) );
    finite_values_integral_image_.resize ( (width_ + 1) * (height_ + 1) );
    if (compute_second_order_integral_images_)
      second_order_integral_image_.resize ( (width_ + 1) * (height_ + 1) );
  }
  computeIntegralImages (data, row_stride, element_stride);
}
Exemple #3
0
		bool TLDDetector::ocl_detect(const Mat& img, const Mat& imgBlurred, Rect2d& res, std::vector<LabeledPatch>& patches, Size initSize)
		{
			patches.clear();
			Mat_<uchar> standardPatch(STANDARD_PATCH_SIZE, STANDARD_PATCH_SIZE);
			Mat tmp;
			int dx = initSize.width / 10, dy = initSize.height / 10;
			Size2d size = img.size();
			double scale = 1.0;
			int npos = 0, nneg = 0;
			double maxSc = -5.0;
			Rect2d maxScRect;
			int scaleID;
			std::vector <Mat> resized_imgs, blurred_imgs;
			std::vector <Point> varBuffer, ensBuffer;
			std::vector <int> varScaleIDs, ensScaleIDs;

			//Detection part
			//Generate windows and filter by variance
			scaleID = 0;
			resized_imgs.push_back(img);
			blurred_imgs.push_back(imgBlurred);
			do
			{
				Mat_<double> intImgP, intImgP2;
				computeIntegralImages(resized_imgs[scaleID], intImgP, intImgP2);
				for (int i = 0, imax = cvFloor((0.0 + resized_imgs[scaleID].cols - initSize.width) / dx); i < imax; i++)
				{
					for (int j = 0, jmax = cvFloor((0.0 + resized_imgs[scaleID].rows - initSize.height) / dy); j < jmax; j++)
					{
						if (!patchVariance(intImgP, intImgP2, originalVariancePtr, Point(dx * i, dy * j), initSize))
							continue;
						varBuffer.push_back(Point(dx * i, dy * j));
						varScaleIDs.push_back(scaleID);
					}
				}
				scaleID++;
				size.width /= SCALE_STEP;
				size.height /= SCALE_STEP;
				scale *= SCALE_STEP;
				resize(img, tmp, size, 0, 0, DOWNSCALE_MODE);
				resized_imgs.push_back(tmp);
				GaussianBlur(resized_imgs[scaleID], tmp, GaussBlurKernelSize, 0.0f);
				blurred_imgs.push_back(tmp);
			} while (size.width >= initSize.width && size.height >= initSize.height);

			//Encsemble classification
			for (int i = 0; i < (int)varBuffer.size(); i++)
			{
				prepareClassifiers((int)blurred_imgs[varScaleIDs[i]].step[0]);
				if (ensembleClassifierNum(&blurred_imgs[varScaleIDs[i]].at<uchar>(varBuffer[i].y, varBuffer[i].x)) <= ENSEMBLE_THRESHOLD)
					continue;
				ensBuffer.push_back(varBuffer[i]);
				ensScaleIDs.push_back(varScaleIDs[i]);
			}

			//NN classification
			//Prepare batch of patches
			int numOfPatches = (int)ensBuffer.size();
			Mat_<uchar> stdPatches(numOfPatches, 225);
			double *resultSr = new double[numOfPatches];
			double *resultSc = new double[numOfPatches];

			uchar *patchesData = stdPatches.data;
			for (int i = 0; i < (int)ensBuffer.size(); i++)
			{
				resample(resized_imgs[ensScaleIDs[i]], Rect2d(ensBuffer[i], initSize), standardPatch);
				uchar *stdPatchData = standardPatch.data;
				for (int j = 0; j < 225; j++)
					patchesData[225*i+j] = stdPatchData[j];
			}
			//Calculate Sr and Sc batches
			ocl_batchSrSc(stdPatches, resultSr, resultSc, numOfPatches);


			for (int i = 0; i < (int)ensBuffer.size(); i++)
			{
				LabeledPatch labPatch;
				standardPatch.data = &stdPatches.data[225 * i];
				double curScale = pow(SCALE_STEP, ensScaleIDs[i]);
				labPatch.rect = Rect2d(ensBuffer[i].x*curScale, ensBuffer[i].y*curScale, initSize.width * curScale, initSize.height * curScale);

				double srValue, scValue;

				srValue = resultSr[i];

				////To fix: Check the paper, probably this cause wrong learning
				//
				labPatch.isObject = srValue > THETA_NN;
				labPatch.shouldBeIntegrated = abs(srValue - THETA_NN) < 0.1;
				patches.push_back(labPatch);
				//

				if (!labPatch.isObject)
				{
					nneg++;
					continue;
				}
				else
				{
					npos++;
				}
				scValue = resultSc[i];
				if (scValue > maxSc)
				{
					maxSc = scValue;
					maxScRect = labPatch.rect;
				}
			}

			if (maxSc < 0)
				return false;
			res = maxScRect;
			return true;
		}