Example #1
0
bool CMainWebCam::DetectMarker(void)
{
	ARUint8*		image;

	pattFound = false;	
	image = ar2VideoGetImage(ARTVideo);
	if(!image) return(false);


	ARMarkerInfo*	markerInfo;
	int				markerNum, objectNum;
	int				i, j, k;
	CMarkerObject*	object;


	ARTImage = image;

	callCountMarkerDetect++;

	if(arDetectMarker(ARTImage, ARTThreshhold, &markerInfo, &markerNum) < 0) {
		exit(-1);
	}

	objectNum = markerObjectList.GetDegree();
	markerObjectList.GoToFirst();
	for(i = 0; i < objectNum; i++) {
		object = (CMarkerObject*) markerObjectList.GetKey();

		if(!object->fix) {
			k = -1;
			for(j = 0; j < markerNum; j++) {
				if(object->id == markerInfo[j].id) {
					if(k == -1) k = j;
					else if(markerInfo[k].cf < markerInfo[j].cf) k = j;
				}
			}
			if(k == -1) {
				object->visible = false;
			} else {
				pattFound = true;
				object->GetTransMat(&markerInfo[k]);
				object->visible = true;
			}
		} else {
			pattFound = true;
		}

		markerObjectList.GoToNext();
	}

	return(true);
}
bool ARToolKitVideoSource::captureFrame() {

	if (deviceState == DEVICE_RUNNING) {

        AR2VideoBufferT *vbuff = ar2VideoGetImage(gVid);
        if (vbuff && vbuff->buff) {
			frameStamp++;
            frameBuffer = vbuff->buff;
            frameBuffer2 = (vbuff->bufPlaneCount == 2 ? vbuff->bufPlanes[1] : NULL);
            return true;
		}
	}

	return false;
}
Example #3
0
static void mainLoop( void )
{
    AR2VideoBufferT *videoBuffL;
    AR2VideoBufferT *videoBuffR;
    ARUint8         *dataPtrL;
    ARUint8         *dataPtrR;
    int              cornerFlagL;
    int              cornerFlagR;
    int              cornerCountL;
    int              cornerCountR;
    char             buf[256];
    int              i;


    if ((videoBuffL = ar2VideoGetImage(vidL))) {
        gVideoBuffL = videoBuffL;
    }
    if ((videoBuffR = ar2VideoGetImage(vidR))) {
        gVideoBuffR = videoBuffR;
    }

    if (gVideoBuffL && gVideoBuffR) {

        // Warn about significant time differences.
        i = ((int)gVideoBuffR->time_sec -  (int)gVideoBuffL->time_sec) * 1000
            + ((int)gVideoBuffR->time_usec - (int)gVideoBuffL->time_usec) / 1000;
        if( i > 20 ) {
            ARLOG("Time diff = %d[msec]\n", i);
        } else if( i < -20 ) {
            ARLOG("Time diff = %d[msec]\n", i);
        }

        dataPtrL = gVideoBuffL->buff;
        dataPtrR = gVideoBuffR->buff;
        glClear(GL_COLOR_BUFFER_BIT);
        argDrawMode2D( vpL );
        argDrawImage( dataPtrL );
        argDrawMode2D( vpR );
        argDrawImage( dataPtrR );

        copyImage( dataPtrL, (ARUint8 *)calibImageL->imageData, xsizeL*ysizeL, pixFormatL );
        cornerFlagL = cvFindChessboardCorners(calibImageL, cvSize(chessboardCornerNumY,chessboardCornerNumX),
                                              cornersL, &cornerCountL, CV_CALIB_CB_ADAPTIVE_THRESH|CV_CALIB_CB_FILTER_QUADS );

        copyImage( dataPtrR, (ARUint8 *)calibImageR->imageData, xsizeR*ysizeR, pixFormatR );
        cornerFlagR = cvFindChessboardCorners(calibImageR, cvSize(chessboardCornerNumY,chessboardCornerNumX),
                                              cornersR, &cornerCountR, CV_CALIB_CB_ADAPTIVE_THRESH|CV_CALIB_CB_FILTER_QUADS );

        argDrawMode2D( vpL );
        if(cornerFlagL) glColor3f(1.0f, 0.0f, 0.0f);
        else            glColor3f(0.0f, 1.0f, 0.0f);
        glLineWidth(2.0f);
        //ARLOG("Detected corners = %d\n", cornerCount);
        for( i = 0; i < cornerCountL; i++ ) {
            argDrawLineByObservedPos(cornersL[i].x-5, cornersL[i].y-5, cornersL[i].x+5, cornersL[i].y+5);
            argDrawLineByObservedPos(cornersL[i].x-5, cornersL[i].y+5, cornersL[i].x+5, cornersL[i].y-5);
            //ARLOG("  %f, %f\n", cornersL[i].x, cornersL[i].y);
            sprintf(buf, "%d\n", i);
            argDrawStringsByObservedPos(buf, cornersL[i].x, cornersL[i].y+20);
        }

        argDrawMode2D( vpR );
        if(cornerFlagR) glColor3f(1.0f, 0.0f, 0.0f);
        else            glColor3f(0.0f, 1.0f, 0.0f);
        glLineWidth(2.0f);
        //ARLOG("Detected corners = %d\n", cornerCount);
        for( i = 0; i < cornerCountR; i++ ) {
            argDrawLineByObservedPos(cornersR[i].x-5, cornersR[i].y-5, cornersR[i].x+5, cornersR[i].y+5);
            argDrawLineByObservedPos(cornersR[i].x-5, cornersR[i].y+5, cornersR[i].x+5, cornersR[i].y-5);
            //ARLOG("  %f, %f\n", cornersR[i].x, cornersR[i].y);
            sprintf(buf, "%d\n", i);
            argDrawStringsByObservedPos(buf, cornersR[i].x, cornersR[i].y+20);
        }

        if( cornerFlagL && cornerFlagR ) {
            cornerFlag = 1;
            glColor3f(1.0f, 0.0f, 0.0f);
        }
        else {
            cornerFlag = 0;
            glColor3f(0.0f, 1.0f, 0.0f);
        }
        argDrawMode2D( vpL );
        sprintf(buf, "Captured Image: %2d/%2d\n", capturedImageNum, calibImageNum);
        argDrawStringsByIdealPos(buf, 10, 30);

        argSwapBuffers();

        gVideoBuffL = gVideoBuffR = NULL;

    } else arUtilSleep(2);
}
Example #4
0
static void mainLoop(void)
{
	static int ms_prev;
	int ms;
	float s_elapsed;
	ARUint8 *image;
    AR2VideoBufferT *movieBuffer;
	ARdouble err;

    int             j, k;
	
	// Find out how long since mainLoop() last ran.
	ms = glutGet(GLUT_ELAPSED_TIME);
	s_elapsed = (float)(ms - ms_prev) * 0.001f;
	if (s_elapsed < 0.01f) return; // Don't update more often than 100 Hz.
	ms_prev = ms;
	
	// Grab a movie frame (if available).
    if ((movieBuffer = ar2VideoGetImage(gMovieVideo)) != NULL) {
        if (movieBuffer->buff && movieBuffer->fillFlag)
            gMovieImage = movieBuffer->buff;
    }
    
	// Grab a video frame.
	if ((image = arVideoGetImage()) != NULL) {
		gARTImage = image;	// Save the fetched image.
        
		gCallCountMarkerDetect++; // Increment ARToolKit FPS counter.
		
		// Detect the markers in the video frame.
		if (arDetectMarker(gARHandle, gARTImage) < 0) {
			exit(-1);
		}
		
		// Check through the marker_info array for highest confidence
		// visible marker matching our preferred pattern.
		k = -1;
		for (j = 0; j < gARHandle->marker_num; j++) {
			if (gARHandle->markerInfo[j].id == gPatt_id) {
				if (k == -1) k = j; // First marker detected.
				else if (gARHandle->markerInfo[j].cf > gARHandle->markerInfo[k].cf) k = j; // Higher confidence marker detected.
			}
		}
		
		if (k != -1) {
			// Get the transformation between the marker and the real camera into gPatt_trans.
            if (gPatt_found && useContPoseEstimation) {
                err = arGetTransMatSquareCont(gAR3DHandle, &(gARHandle->markerInfo[k]), gPatt_trans, gPatt_width, gPatt_trans);
            } else {
                err = arGetTransMatSquare(gAR3DHandle, &(gARHandle->markerInfo[k]), gPatt_width, gPatt_trans);
                // Marker has appeared, so un-pause movie.
                ar2VideoCapStart(gMovieVideo);
            }
			gPatt_found = TRUE;
		} else {
            if (gPatt_found) {
                // Marker has disappeared, so pause movie.
                ar2VideoCapStop(gMovieVideo);
            }
			gPatt_found = FALSE;
		}
		
		// Tell GLUT the display has changed.
		glutPostRedisplay();
	}
}
Example #5
0
ARUint8 *arVideoGetImage( void )
{
    if( vid == NULL ) return NULL;

    return ar2VideoGetImage( vid );
}
/* CHECKED TODO
*  PsychARGetTextureFromCapture() -- Create an OpenGL texturemap from a specific videoframe from given capture object.
*
*  win = Window pointer of onscreen window for which a OpenGL texture should be created.
*  capturehandle = Handle to the capture object.
*  checkForImage = >0 == Just check if new image available, 0 == really retrieve the image, blocking if necessary.
*                   2 == Check for new image, block inside this function (if possible) if no image available.
*
*  timeindex = This parameter is currently ignored and reserved for future use.
*  out_texture = Pointer to the Psychtoolbox texture-record where the new texture should be stored.
*  presentation_timestamp = A ptr to a double variable, where the presentation timestamp of the returned frame should be stored.
*  summed_intensity = An optional ptr to a double variable. If non-NULL, then sum of intensities over all channels is calculated and returned.
*  outrawbuffer = An optional ptr to a memory buffer of sufficient size. If non-NULL, the buffer will be filled with the captured raw image data, e.g., for use inside Matlab or whatever...
*  Returns Number of pending or dropped frames after fetch on success (>=0), -1 if no new image available yet, -2 if no new image available and there won't be any in future.
*/
int PsychARGetTextureFromCapture(PsychWindowRecordType *win, int capturehandle, int checkForImage, double timeindex,
								 PsychWindowRecordType *out_texture, double *presentation_timestamp, double* summed_intensity, rawcapimgdata* outrawbuffer)
{
    GLuint texid;
    int w, h;
    double targetdelta, realdelta, frames;
    unsigned int intensity = 0;
    unsigned int count, i, bpp;
    unsigned char* pixptr;
    psych_bool newframe = FALSE;
    double tstart, tend;
    unsigned int pixval, alphacount;
    int error;
    int nrdropped = 0;
    unsigned char* input_image = NULL;
	
    // Retrieve device record for handle:
    PsychVidcapRecordType* capdev = PsychGetARVidcapRecord(capturehandle);
	
	// Compute width and height for later creation of textures etc. Need to do this here,
	// so we can return the values for raw data retrieval:
	w=capdev->width;
    h=capdev->height;

	// Size of a single pixel in bytes:
	bpp = capdev->reqpixeldepth;
	
	// If a outrawbuffer struct is provided, we fill it with info needed to allocate a
	// sufficient memory buffer for returned raw image data later on:
	if (outrawbuffer) {
		outrawbuffer->w = w;
		outrawbuffer->h = h;
		outrawbuffer->depth = bpp;
	}
	
    // int waitforframe = (checkForImage > 1) ? 1:0; // Blocking wait for new image requested?
	
	// A checkForImage 4 means "no op" with the ARVideo capture engine: This is meant to drive
	// a movie recording engine, ie., grant processing time to it. Our ARVideo engine doesn't
	// support movie recording, so this is a no-op:
	if (checkForImage == 4) return(0);

    // Take start timestamp for timing stats:
    PsychGetAdjustedPrecisionTimerSeconds(&tstart);
	
    // Should we just check for new image?
    if (checkForImage) {
		// Reset current dropped count to zero:
		capdev->current_dropped = 0;
        
		if (capdev->grabber_active == 0) {
			// Grabber stopped. We'll never get a new image:
			return(-2);
		}
		
		// Check for image in polling mode: We capture in non-blocking mode:			
		capdev->frame = ar2VideoGetImage(capdev->camera);

		// Ok, call succeeded. If the 'frame' pointer is non-NULL then there's a new frame ready and dequeued from DMA
		// ringbuffer. We'll return it on next non-poll invocation. Otherwise no new video data ready yet:
		capdev->frame_ready = (capdev->frame != NULL) ? 1 : 0;

		
		if (capdev->frame_ready) {
			// Store count of currently queued frames (in addition to the one just fetched).
			// This is an indication of how well the users script is keeping up with the video stream,
			// technically the number of frames that would need to be dropped to keep in sync with the
			// stream.
			// TODO: Think about this. ARVideo doesn't support a query for pending/dropped frames, so
			// we either need to live without this feature or think up something clever...
			capdev->current_dropped = (int) 0;
			
			// Ok, at least one new frame ready. If more than one frame has queued up and
			// we are in 'dropframes' mode, ie. we should always deliver the most recent available
			// frame, then we quickly fetch & discard all queued frames except the last one.
			while((capdev->dropframes) && ((int) capdev->current_dropped > 0)) {
				// We just poll - fetch the frames. As we know there are some queued frames, it
				// doesn't matter if we poll or block, but polling sounds like a bit less overhead
				// at the OS level:
				
				// First enqueue the recently dequeued buffer...
				if (ar2VideoCapNext(capdev->camera) != DC1394_SUCCESS) {
					PsychErrorExitMsg(PsychError_system, "Requeuing of discarded video frame failed while dropping frames (dropframes=1)!!!");
				}
				
				// Then fetch the next one:
				if ((capdev->frame = ar2VideoGetImage(capdev->camera)) == NULL) {
					// Polling failed for some reason...
					PsychErrorExitMsg(PsychError_system, "Polling for new video frame failed while dropping frames (dropframes=1)!!!");
				}
				
			}
			
			// Update stats for decompression:
			PsychGetAdjustedPrecisionTimerSeconds(&tend);
			
			// Increase counter of decompressed frames:
			capdev->nrframes++;
			
			// Update avg. decompress time:
			capdev->avg_decompresstime+=(tend - tstart);
			
			// Query capture timestamp in seconds:
			// TODO: ARVideo doesn't provide such a timestamp. For now we just return the current
			// system time as a lame replacement...
			// On Windows there would be uint64 capdev->camera->g_Timestamp
			PsychGetAdjustedPrecisionTimerSeconds(&(capdev->current_pts));
		}

		// Return availability status: 0 = new frame ready for retrieval. -1 = No new frame ready yet.
		return((capdev->frame_ready) ? 0 : -1);
    }
    
    // This point is only reached if checkForImage == FALSE, which only happens
    // if a new frame is available in our buffer:
    
    // Presentation timestamp requested?
    if (presentation_timestamp) {
		// Return it:
		*presentation_timestamp = capdev->current_pts;
    }
	
    // Synchronous texture fetch: Copy content of capture buffer into a texture:
    // =========================================================================
	
    // input_image points to the image buffer in our cam:
    input_image = (unsigned char*) (capdev->frame);
	
    // Do we want to do something with the image data and have a
    // scratch buffer for color conversion alloc'ed?
    if ((capdev->scratchbuffer) && ((out_texture) || (summed_intensity) || (outrawbuffer))) {
		// Yes. Perform color-conversion YUV->RGB from cameras DMA buffer
		// into the scratch buffer and set scratch buffer as source for
		// all further operations:

		memcpy(capdev->scratchbuffer, input_image, capdev->width * capdev->height * bpp);
		
		// Ok, at this point we should have a RGB8 texture image ready in scratch_buffer.
		// Set scratch buffer as our new image source for all further processing:
		input_image = (unsigned char*) capdev->scratchbuffer;
    }
	
    // Only setup if really a texture is requested (non-benchmarking mode):
    if (out_texture) {
		PsychMakeRect(out_texture->rect, 0, 0, w, h);    
		
		// Set NULL - special texture object as part of the PTB texture record:
		out_texture->targetSpecific.QuickTimeGLTexture = NULL;
		
		// Set texture orientation as if it were an inverted Offscreen window: Upside-down.
		out_texture->textureOrientation = 3;
		
		#if PSYCH_SYSTEM == PSYCH_WINDOWS
		// On Windows in non RGB32 bit modes, set orientation to Upright:
		out_texture->textureOrientation = (capdev->reqpixeldepth == 4) ? 3 : 2;
		#endif

		// Setup a pointer to our buffer as texture data pointer: Setting memsize to zero
		// prevents unwanted free() operation in PsychDeleteTexture...
		out_texture->textureMemorySizeBytes = 0;
		
		// Set texture depth: Could be 8, 16, 24 or 32 bpp.
		out_texture->depth = capdev->reqpixeldepth * 8;
		
		// This will retrieve an OpenGL compatible pointer to the pixel data and assign it to our texmemptr:
		out_texture->textureMemory = (GLuint*) input_image;
		
		// Let PsychCreateTexture() do the rest of the job of creating, setting up and
		// filling an OpenGL texture with content:
		PsychCreateTexture(out_texture);
		
		// Ready to use the texture...
    }
    
    // Sum of pixel intensities requested?
    if(summed_intensity) {
		pixptr = (unsigned char*) input_image;
		count  = w * h * bpp;
		for (i=0; i<count; i++) intensity+=(unsigned int) pixptr[i];
		*summed_intensity = ((double) intensity) / w / h / bpp;
    }
	
	// Raw data requested?
	if (outrawbuffer) {
		// Copy it out:
		outrawbuffer->w = w;
		outrawbuffer->h = h;
		outrawbuffer->depth = bpp;
		count = (w * h * outrawbuffer->depth);
		memcpy(outrawbuffer->data, (const void *) input_image, count);
	}
	
    // Release the capture buffer. Return it to the DMA ringbuffer pool:
	if (ar2VideoCapNext(capdev->camera) != DC1394_SUCCESS) {
		PsychErrorExitMsg(PsychError_system, "Re-Enqueuing processed video frame failed.");
	}

    // Update total count of dropped (or pending) frames:
    capdev->nr_droppedframes += capdev->current_dropped;
    nrdropped = capdev->current_dropped;
    capdev->current_dropped = 0;
	
    // Timestamping:
    PsychGetAdjustedPrecisionTimerSeconds(&tend);
	
    // Increase counter of retrieved textures:
    capdev->nrgfxframes++;
	
    // Update average time spent in texture conversion:
    capdev->avg_gfxtime+=(tend - tstart);
    
    // We're successfully done! Return number of dropped (or pending in DMA ringbuffer) frames:
    return(nrdropped);
}