Пример #1
0
void imProcessThreshold(const imImage* src_image, imImage* dst_image, float level, int value)
{
  switch(src_image->data_type)
  {
  case IM_BYTE:
    doThreshold((imbyte*)src_image->data[0], (imbyte*)dst_image->data[0], 
                             src_image->count, (imbyte)level, value);
    break;                                                                                
  case IM_SHORT:                                                                           
    doThreshold((short*)src_image->data[0], (imbyte*)dst_image->data[0], 
                             src_image->count, (short)level, value);
    break;                                                                                
  case IM_USHORT:                                                                           
    doThreshold((imushort*)src_image->data[0], (imbyte*)dst_image->data[0], 
                             src_image->count, (imushort)level, value);
    break;                                                                                
  case IM_INT:                                                                           
    doThreshold((int*)src_image->data[0], (imbyte*)dst_image->data[0], 
                             src_image->count, (int)level, value);
    break;                                                                                
  case IM_FLOAT:
    doThreshold((float*)src_image->data[0], (imbyte*)dst_image->data[0], 
                             src_image->count, (float)level, value);
    break;                                                                                
  }
}
Пример #2
0
void Kinect::doVision() {
	// get depth pixels
	int numPx = kinect.getWidth()*kinect.getHeight();
	memcpy(depths, kinect.getDepthPixels(), numPx);
	
	doThreshold();
	
	// load into a cvImage
	thresholded.setFromPixels(thresholdedPixels, kinect.getWidth(), kinect.getHeight());
	
	// flip as required
	if(flipX || flipY) thresholded.mirror(flipY, flipX);
	
	// find contours
	contourFinder.findContours( thresholded,
							   100, getWidth()*getHeight()/4,
							   5, false);
	

	vector<ofVec3f> blobs;
	for(int i = 0; i < contourFinder.nBlobs; i++) {
		blobs.push_back(
				ofVec3f(find3DBlobCentre(contourFinder.blobs[i]))
		);
		
	}
	
	// this is when the notifictations will fire.
	blobTracker.track(blobs);
}
Пример #3
0
int ARTracker::processFrame(cv::Mat& frame){
    std::vector<std::vector<cv::Point> > contours;
    std::vector<ARMarker> potentialMarkers;
    
    // flush out previously detected markers
    _detectedMarkers.clear();
    
    doGreyScale(_greyMat, frame);
    
    if( isBlurring() ){
        doBlur(_greyMat, _greyMat);
    }
    
    doThreshold(_binaryMat, _greyMat);
    
    doFindContours(contours, _binaryMat);
    
    doFindPotentialMarkers(potentialMarkers, contours, _binaryMat);
    
    doFindMarkers(_detectedMarkers, potentialMarkers, _binaryMat);
    
    doSubpixelAccuracy(_detectedMarkers, _greyMat);
    
    doEstimatePose(_detectedMarkers);
    
    processDebugRequest(frame, _greyMat, _binaryMat, contours, potentialMarkers, _detectedMarkers);
    
    return _detectedMarkers.size();
    
}
Пример #4
0
Detector::Status Detector::loadSpectacles(const string path)
{
    spectaclesSrc = path;
    Status status = doLoadImage(spectaclesSrc, spectacles);
    if (status!=StatusOK) return status;
    //crop the specs
    doThreshold(spectacles, specMask, 150, 255, CV_THRESH_BINARY_INV );
    if (debug) {
        namedWindow("Threshold");
        imshow("Threshold",specMask);
    }
    int lowRow = -1, highRow = specMask.rows;
    while(sum(specMask.row(++lowRow))==Scalar::all(0));
    while(sum(specMask.row(--highRow))==Scalar::all(0));
    spectacles = spectacles.rowRange(lowRow, highRow);
    specMask = specMask.rowRange(lowRow, highRow);
    int lowCol = -1, highCol = specMask.cols;
    while(sum(specMask.col(++lowCol))==Scalar::all(0));
    while(sum(specMask.col(--highCol))==Scalar::all(0));
    spectacles = spectacles.colRange(lowCol, highCol);
    if (debug) {
        namedWindow("Spectacles");
        imshow("Spectacles", spectacles);
    }
    return StatusOK;
}
Пример #5
0
Detector::Status Detector::detectAndDraw()
{
    const Scalar eye_color({0,255,255});
    const Scalar face_color({255,128,0});
    vector<Rect> objects;
    vector<Rect> faces;

    //check the data
    if( eyesCascade.empty() || faceCascade.empty() )
    {
        return StatusNoModel;
    }
    image = imread(imageSrc.c_str() );
    if (!image.data) {
        return StatusNoImage;
    }
    if (!spectacles.data) {
        return StatusNoSpectacles;
    }
    doThreshold(spectacles, specMask, 150, 255, CV_THRESH_BINARY_INV );
    if (debug) {
        namedWindow("Threshold");
        imshow("Threshold",specMask);
    }

    //convert the image to grayscale
    Mat gray(image.rows, image.cols, CV_8UC1);
    cvtColor(image, gray, CV_BGR2GRAY);
    equalizeHist(gray, gray);

    // detect faces and eyes
    faceCascade.detectMultiScale(gray, faces, 1.1, 2|CV_HAAR_SCALE_IMAGE);
    if (debug) doMarkObjects(faces, face_color);

    eyesCascade.detectMultiScale( gray, objects, 1.1, 2, 0|CV_HAAR_SCALE_IMAGE );
    if (debug) doMarkObjects(objects, eye_color);

    //sort the results from left-to-right
    std::sort(faces.begin(), faces.end(), by_x());
    std::sort(objects.begin(), objects.end(), by_x());

    //iterate over all found faces
    pred_within_rect pwr;
    for ( vector<Rect>::iterator face = faces.begin();face<faces.end();face++)
    {
        //process the detected face: if there are eyes found within it, then put spectacles on it
        pwr.r = (*face);
        vector<Rect>::iterator eye =std::find_if(objects.begin(), objects.end(),pwr);
        if (eye!=objects.end())
        {
            doPutSpectaclesOnFace(*face, eye);
        }
    }
    imshow(title, image);
    return StatusOK;
}
Пример #6
0
void Detector::doPutSpectaclesOnFace(const Rect face, vector<Rect>::const_iterator eye )
{
    //assumption: spectacles should be larger than detected eyes, and approximately as wide as a face
    int left = face.x;
    int right = left+face.width;
    int scaledHeight = spectacles.rows*(right-left)*1./spectacles.cols;
    int top = (*eye).y +((*eye).height/2) - scaledHeight/2;
    if (top<0) top=1;
    int bottom = top+scaledHeight;

    //resize spectacles and the copy mask
    Mat scaledSpecs(bottom-top, right-left, CV_8UC1);
    Mat scaledSpecsMask(scaledSpecs.size(), CV_8UC1);
    resize(spectacles, scaledSpecs,scaledSpecs.size() );
    doThreshold(scaledSpecs, scaledSpecsMask, 160, 255, CV_THRESH_BINARY_INV);
    //resize(specMask,scaledSpecsMask, scaledSpecs.size());

    //put on target
    const Rect dstRect(left, top, right-left, bottom-top);
    Mat target(image,dstRect );
    bitwise_and(image(dstRect), scaledSpecs, target, scaledSpecsMask);
    bitwise_or(image(dstRect), scaledSpecs, target, scaledSpecsMask);

 }