void CWebCam::Display(ARGL_CONTEXT_SETTINGS_REF arglSettings) { if(!ARTImage) return; if(dispImage) arglDispImage(ARTImage, &ARTCparam, 1.0, arglSettings); if(pattFound) { glMatrixMode(GL_PROJECTION); glLoadMatrixd(projectionMat); this->Draw(); } ar2VideoCapNext(ARTVideo); }
int CWebCam::SetupWebCam(const char *cparam_names, char *vconfs) { int xsize, ysize; ARParam wparam; if((ARTVideo = ar2VideoOpen(vconfs)) == 0) return(0); if(ar2VideoInqSize(ARTVideo, &xsize, &ysize) < 0) return(0); if(arParamLoad(cparam_names, 1, &wparam) < 0) return(0); arParamChangeSize(&wparam, xsize, ysize, &ARTCparam); arInitCparam(&ARTCparam); arParamDisp(&ARTCparam); ARTThreshhold = 100; arglCameraFrustumRH(&ARTCparam, VIEW_DISTANCE_MIN, VIEW_DISTANCE_MAX, projectionMat); if(ar2VideoCapStart(ARTVideo) != 0) return(0); ar2VideoCapNext(ARTVideo); return(1); }
int arVideoCapNext( void ) { if( vid == NULL ) return -1; return ar2VideoCapNext( vid ); }
/* CHECKED TODO * PsychARGetTextureFromCapture() -- Create an OpenGL texturemap from a specific videoframe from given capture object. * * win = Window pointer of onscreen window for which a OpenGL texture should be created. * capturehandle = Handle to the capture object. * checkForImage = >0 == Just check if new image available, 0 == really retrieve the image, blocking if necessary. * 2 == Check for new image, block inside this function (if possible) if no image available. * * timeindex = This parameter is currently ignored and reserved for future use. * out_texture = Pointer to the Psychtoolbox texture-record where the new texture should be stored. * presentation_timestamp = A ptr to a double variable, where the presentation timestamp of the returned frame should be stored. * summed_intensity = An optional ptr to a double variable. If non-NULL, then sum of intensities over all channels is calculated and returned. * outrawbuffer = An optional ptr to a memory buffer of sufficient size. If non-NULL, the buffer will be filled with the captured raw image data, e.g., for use inside Matlab or whatever... * Returns Number of pending or dropped frames after fetch on success (>=0), -1 if no new image available yet, -2 if no new image available and there won't be any in future. */ int PsychARGetTextureFromCapture(PsychWindowRecordType *win, int capturehandle, int checkForImage, double timeindex, PsychWindowRecordType *out_texture, double *presentation_timestamp, double* summed_intensity, rawcapimgdata* outrawbuffer) { GLuint texid; int w, h; double targetdelta, realdelta, frames; unsigned int intensity = 0; unsigned int count, i, bpp; unsigned char* pixptr; psych_bool newframe = FALSE; double tstart, tend; unsigned int pixval, alphacount; int error; int nrdropped = 0; unsigned char* input_image = NULL; // Retrieve device record for handle: PsychVidcapRecordType* capdev = PsychGetARVidcapRecord(capturehandle); // Compute width and height for later creation of textures etc. Need to do this here, // so we can return the values for raw data retrieval: w=capdev->width; h=capdev->height; // Size of a single pixel in bytes: bpp = capdev->reqpixeldepth; // If a outrawbuffer struct is provided, we fill it with info needed to allocate a // sufficient memory buffer for returned raw image data later on: if (outrawbuffer) { outrawbuffer->w = w; outrawbuffer->h = h; outrawbuffer->depth = bpp; } // int waitforframe = (checkForImage > 1) ? 1:0; // Blocking wait for new image requested? // A checkForImage 4 means "no op" with the ARVideo capture engine: This is meant to drive // a movie recording engine, ie., grant processing time to it. Our ARVideo engine doesn't // support movie recording, so this is a no-op: if (checkForImage == 4) return(0); // Take start timestamp for timing stats: PsychGetAdjustedPrecisionTimerSeconds(&tstart); // Should we just check for new image? if (checkForImage) { // Reset current dropped count to zero: capdev->current_dropped = 0; if (capdev->grabber_active == 0) { // Grabber stopped. We'll never get a new image: return(-2); } // Check for image in polling mode: We capture in non-blocking mode: capdev->frame = ar2VideoGetImage(capdev->camera); // Ok, call succeeded. If the 'frame' pointer is non-NULL then there's a new frame ready and dequeued from DMA // ringbuffer. We'll return it on next non-poll invocation. Otherwise no new video data ready yet: capdev->frame_ready = (capdev->frame != NULL) ? 1 : 0; if (capdev->frame_ready) { // Store count of currently queued frames (in addition to the one just fetched). // This is an indication of how well the users script is keeping up with the video stream, // technically the number of frames that would need to be dropped to keep in sync with the // stream. // TODO: Think about this. ARVideo doesn't support a query for pending/dropped frames, so // we either need to live without this feature or think up something clever... capdev->current_dropped = (int) 0; // Ok, at least one new frame ready. If more than one frame has queued up and // we are in 'dropframes' mode, ie. we should always deliver the most recent available // frame, then we quickly fetch & discard all queued frames except the last one. while((capdev->dropframes) && ((int) capdev->current_dropped > 0)) { // We just poll - fetch the frames. As we know there are some queued frames, it // doesn't matter if we poll or block, but polling sounds like a bit less overhead // at the OS level: // First enqueue the recently dequeued buffer... if (ar2VideoCapNext(capdev->camera) != DC1394_SUCCESS) { PsychErrorExitMsg(PsychError_system, "Requeuing of discarded video frame failed while dropping frames (dropframes=1)!!!"); } // Then fetch the next one: if ((capdev->frame = ar2VideoGetImage(capdev->camera)) == NULL) { // Polling failed for some reason... PsychErrorExitMsg(PsychError_system, "Polling for new video frame failed while dropping frames (dropframes=1)!!!"); } } // Update stats for decompression: PsychGetAdjustedPrecisionTimerSeconds(&tend); // Increase counter of decompressed frames: capdev->nrframes++; // Update avg. decompress time: capdev->avg_decompresstime+=(tend - tstart); // Query capture timestamp in seconds: // TODO: ARVideo doesn't provide such a timestamp. For now we just return the current // system time as a lame replacement... // On Windows there would be uint64 capdev->camera->g_Timestamp PsychGetAdjustedPrecisionTimerSeconds(&(capdev->current_pts)); } // Return availability status: 0 = new frame ready for retrieval. -1 = No new frame ready yet. return((capdev->frame_ready) ? 0 : -1); } // This point is only reached if checkForImage == FALSE, which only happens // if a new frame is available in our buffer: // Presentation timestamp requested? if (presentation_timestamp) { // Return it: *presentation_timestamp = capdev->current_pts; } // Synchronous texture fetch: Copy content of capture buffer into a texture: // ========================================================================= // input_image points to the image buffer in our cam: input_image = (unsigned char*) (capdev->frame); // Do we want to do something with the image data and have a // scratch buffer for color conversion alloc'ed? if ((capdev->scratchbuffer) && ((out_texture) || (summed_intensity) || (outrawbuffer))) { // Yes. Perform color-conversion YUV->RGB from cameras DMA buffer // into the scratch buffer and set scratch buffer as source for // all further operations: memcpy(capdev->scratchbuffer, input_image, capdev->width * capdev->height * bpp); // Ok, at this point we should have a RGB8 texture image ready in scratch_buffer. // Set scratch buffer as our new image source for all further processing: input_image = (unsigned char*) capdev->scratchbuffer; } // Only setup if really a texture is requested (non-benchmarking mode): if (out_texture) { PsychMakeRect(out_texture->rect, 0, 0, w, h); // Set NULL - special texture object as part of the PTB texture record: out_texture->targetSpecific.QuickTimeGLTexture = NULL; // Set texture orientation as if it were an inverted Offscreen window: Upside-down. out_texture->textureOrientation = 3; #if PSYCH_SYSTEM == PSYCH_WINDOWS // On Windows in non RGB32 bit modes, set orientation to Upright: out_texture->textureOrientation = (capdev->reqpixeldepth == 4) ? 3 : 2; #endif // Setup a pointer to our buffer as texture data pointer: Setting memsize to zero // prevents unwanted free() operation in PsychDeleteTexture... out_texture->textureMemorySizeBytes = 0; // Set texture depth: Could be 8, 16, 24 or 32 bpp. out_texture->depth = capdev->reqpixeldepth * 8; // This will retrieve an OpenGL compatible pointer to the pixel data and assign it to our texmemptr: out_texture->textureMemory = (GLuint*) input_image; // Let PsychCreateTexture() do the rest of the job of creating, setting up and // filling an OpenGL texture with content: PsychCreateTexture(out_texture); // Ready to use the texture... } // Sum of pixel intensities requested? if(summed_intensity) { pixptr = (unsigned char*) input_image; count = w * h * bpp; for (i=0; i<count; i++) intensity+=(unsigned int) pixptr[i]; *summed_intensity = ((double) intensity) / w / h / bpp; } // Raw data requested? if (outrawbuffer) { // Copy it out: outrawbuffer->w = w; outrawbuffer->h = h; outrawbuffer->depth = bpp; count = (w * h * outrawbuffer->depth); memcpy(outrawbuffer->data, (const void *) input_image, count); } // Release the capture buffer. Return it to the DMA ringbuffer pool: if (ar2VideoCapNext(capdev->camera) != DC1394_SUCCESS) { PsychErrorExitMsg(PsychError_system, "Re-Enqueuing processed video frame failed."); } // Update total count of dropped (or pending) frames: capdev->nr_droppedframes += capdev->current_dropped; nrdropped = capdev->current_dropped; capdev->current_dropped = 0; // Timestamping: PsychGetAdjustedPrecisionTimerSeconds(&tend); // Increase counter of retrieved textures: capdev->nrgfxframes++; // Update average time spent in texture conversion: capdev->avg_gfxtime+=(tend - tstart); // We're successfully done! Return number of dropped (or pending in DMA ringbuffer) frames: return(nrdropped); }