/* Send all commands to the hardware. If vertex buffers or indirect * buffers are in use, then we need to make sure they are sent to the * hardware. All commands that are normally sent to the ring are * already considered `flushed'. */ static void r128DDFlush( GLcontext *ctx ) { r128ContextPtr rmesa = R128_CONTEXT(ctx); FLUSH_BATCH( rmesa ); #if ENABLE_PERF_BOXES if ( rmesa->boxes ) { LOCK_HARDWARE( rmesa ); r128PerformanceBoxesLocked( rmesa ); UNLOCK_HARDWARE( rmesa ); } /* Log the performance counters if necessary */ r128PerformanceCounters( rmesa ); #endif }
void r128PageFlip( const __DRIdrawablePrivate *dPriv ) { r128ContextPtr rmesa; GLint ret; GLboolean missed_target; assert(dPriv); assert(dPriv->driContextPriv); assert(dPriv->driContextPriv->driverPrivate); rmesa = (r128ContextPtr) dPriv->driContextPriv->driverPrivate; if ( R128_DEBUG & DEBUG_VERBOSE_API ) { fprintf( stderr, "\n%s( %p ): page=%d\n\n", __FUNCTION__, (void *)rmesa->glCtx, rmesa->sarea->pfCurrentPage ); } FLUSH_BATCH( rmesa ); LOCK_HARDWARE( rmesa ); /* Throttle the frame rate -- only allow one pending swap buffers * request at a time. */ if ( !r128WaitForFrameCompletion( rmesa ) ) { rmesa->hardwareWentIdle = 1; } else { rmesa->hardwareWentIdle = 0; } UNLOCK_HARDWARE( rmesa ); driWaitForVBlank( dPriv, &rmesa->vbl_seq, rmesa->vblank_flags, &missed_target ); LOCK_HARDWARE( rmesa ); /* The kernel will have been initialized to perform page flipping * on a swapbuffers ioctl. */ ret = drmCommandNone( rmesa->driFd, DRM_R128_FLIP ); UNLOCK_HARDWARE( rmesa ); if ( ret ) { fprintf( stderr, "DRM_R128_FLIP: return = %d\n", ret ); exit( 1 ); } /* Get ready for drawing next frame. Update the renderbuffers' * flippedOffset/Pitch fields so we draw into the right place. */ driFlipRenderbuffers(rmesa->glCtx->WinSysDrawBuffer, rmesa->sarea->pfCurrentPage); rmesa->new_state |= R128_NEW_WINDOW; /* FIXME: Do we need this anymore? */ rmesa->new_state |= R128_NEW_CONTEXT; rmesa->dirty |= (R128_UPLOAD_CONTEXT | R128_UPLOAD_MASKS | R128_UPLOAD_CLIPRECTS); #if ENABLE_PERF_BOXES /* Log the performance counters if necessary */ r128PerformanceCounters( rmesa ); #endif }
/* Copy the back color buffer to the front color buffer. */ void r128CopyBuffer( const __DRIdrawablePrivate *dPriv ) { r128ContextPtr rmesa; GLint nbox, i, ret; GLboolean missed_target; assert(dPriv); assert(dPriv->driContextPriv); assert(dPriv->driContextPriv->driverPrivate); rmesa = (r128ContextPtr) dPriv->driContextPriv->driverPrivate; if ( R128_DEBUG & DEBUG_VERBOSE_API ) { fprintf( stderr, "\n********************************\n" ); fprintf( stderr, "\n%s( %p )\n\n", __FUNCTION__, (void *)rmesa->glCtx ); fflush( stderr ); } FLUSH_BATCH( rmesa ); LOCK_HARDWARE( rmesa ); /* Throttle the frame rate -- only allow one pending swap buffers * request at a time. */ if ( !r128WaitForFrameCompletion( rmesa ) ) { rmesa->hardwareWentIdle = 1; } else { rmesa->hardwareWentIdle = 0; } UNLOCK_HARDWARE( rmesa ); driWaitForVBlank( dPriv, &rmesa->vbl_seq, rmesa->vblank_flags, &missed_target ); LOCK_HARDWARE( rmesa ); nbox = dPriv->numClipRects; /* must be in locked region */ for ( i = 0 ; i < nbox ; ) { GLint nr = MIN2( i + R128_NR_SAREA_CLIPRECTS , nbox ); drm_clip_rect_t *box = dPriv->pClipRects; drm_clip_rect_t *b = rmesa->sarea->boxes; GLint n = 0; for ( ; i < nr ; i++ ) { *b++ = box[i]; n++; } rmesa->sarea->nbox = n; ret = drmCommandNone( rmesa->driFd, DRM_R128_SWAP ); if ( ret ) { UNLOCK_HARDWARE( rmesa ); fprintf( stderr, "DRM_R128_SWAP: return = %d\n", ret ); exit( 1 ); } } if ( R128_DEBUG & DEBUG_ALWAYS_SYNC ) { i = 0; do { ret = drmCommandNone(rmesa->driFd, DRM_R128_CCE_IDLE); } while ( ret && errno == EBUSY && i++ < R128_IDLE_RETRY ); } UNLOCK_HARDWARE( rmesa ); rmesa->new_state |= R128_NEW_CONTEXT; rmesa->dirty |= (R128_UPLOAD_CONTEXT | R128_UPLOAD_MASKS | R128_UPLOAD_CLIPRECTS); #if ENABLE_PERF_BOXES /* Log the performance counters if necessary */ r128PerformanceCounters( rmesa ); #endif }