/* Break the COW tie to the pbo. Both the pbo and the region end up * with a copy of the data. */ void intel_region_cow(struct intel_context *intel, struct intel_region *region) { struct intel_buffer_object *pbo = region->pbo; GLboolean ok; intel_region_release_pbo(intel, region); assert(region->cpp * region->pitch * region->height == pbo->Base.Size); _DBG("%s %p (%d bytes)\n", __FUNCTION__, region, pbo->Base.Size); /* Now blit from the texture buffer to the new buffer: */ LOCK_HARDWARE(intel); ok = intelEmitCopyBlit(intel, region->cpp, region->pitch, pbo->buffer, 0, region->tiling, region->pitch, region->buffer, 0, region->tiling, 0, 0, 0, 0, region->pitch, region->height, GL_COPY); assert(ok); UNLOCK_HARDWARE(intel); }
void r128SpanRenderStart( GLcontext *ctx ) { r128ContextPtr rmesa = R128_CONTEXT(ctx); FLUSH_BATCH(rmesa); LOCK_HARDWARE(rmesa); r128WaitForIdleLocked( rmesa ); }
/* Upload data to a rectangular sub-region. Lots of choices how to do this: * * - memcpy by span to current destination * - upload data as new buffer and blit * * Currently always memcpy. */ void intel_region_data(struct intel_context *intel, struct intel_region *dst, GLuint dst_offset, GLuint dstx, GLuint dsty, const void *src, GLuint src_pitch, GLuint srcx, GLuint srcy, GLuint width, GLuint height) { _DBG("%s\n", __FUNCTION__); if (intel == NULL) return; if (dst->pbo) { if (dstx == 0 && dsty == 0 && width == dst->pitch && height == dst->height) intel_region_release_pbo(intel, dst); else intel_region_cow(intel, dst); } LOCK_HARDWARE(intel); _mesa_copy_rect(intel_region_map(intel, dst) + dst_offset, dst->cpp, dst->pitch, dstx, dsty, width, height, src, src_pitch, srcx, srcy); intel_region_unmap(intel, dst); UNLOCK_HARDWARE(intel); }
void sisSpanRenderStart( GLcontext *ctx ) { sisContextPtr smesa = SIS_CONTEXT(ctx); SIS_FIREVERTICES(smesa); LOCK_HARDWARE(); WaitEngIdle( smesa ); }
void i830FlushPrims( i830ContextPtr imesa ) { if (imesa->vertex_buffer) { LOCK_HARDWARE( imesa ); i830FlushPrimsLocked( imesa ); UNLOCK_HARDWARE( imesa ); } }
/* This waits for *everybody* to finish rendering -- overkill. */ void i810DmaFinish( i810ContextPtr imesa ) { I810_FIREVERTICES( imesa ); LOCK_HARDWARE( imesa ); i810RegetLockQuiescent( imesa ); UNLOCK_HARDWARE( imesa ); }
static void nouveauSpanRenderStart( GLcontext *ctx ) { nouveauContextPtr nmesa = NOUVEAU_CONTEXT(ctx); FIRE_RING(); LOCK_HARDWARE(nmesa); nouveauWaitForIdleLocked( nmesa ); }
static void ffbDDFinish(GLcontext *ctx) { ffbContextPtr fmesa = FFB_CONTEXT(ctx); LOCK_HARDWARE(fmesa); FFBWait(fmesa, fmesa->regs); UNLOCK_HARDWARE(fmesa); }
/* Move locking out to get reasonable span performance. */ void intelSpanRenderStart( GLcontext *ctx ) { intelContextPtr intel = INTEL_CONTEXT(ctx); intelFlush(&intel->ctx); LOCK_HARDWARE(intel); intelWaitForIdle(intel); }
static void radeonSpanRenderStart( GLcontext *ctx ) { radeonContextPtr rmesa = RADEON_CONTEXT( ctx ); // R300_FIREVERTICES( rmesa ); // old code has flush r300Flush(ctx); LOCK_HARDWARE( rmesa ); radeonWaitForIdleLocked( rmesa ); }
int radeonUploadTexImages( radeonContextPtr rmesa, radeonTexObjPtr t, GLuint face ) { const int numLevels = t->base.lastLevel - t->base.firstLevel + 1; if ( RADEON_DEBUG & (DEBUG_TEXTURE|DEBUG_IOCTL) ) { fprintf( stderr, "%s( %p, %p ) sz=%d lvls=%d-%d\n", __FUNCTION__, (void *)rmesa->glCtx, (void *)t->base.tObj, t->base.totalSize, t->base.firstLevel, t->base.lastLevel ); } if ( !t || t->base.totalSize == 0 ) return 0; LOCK_HARDWARE( rmesa ); if ( t->base.memBlock == NULL ) { int heap; heap = driAllocateTexture( rmesa->texture_heaps, rmesa->nr_heaps, (driTextureObject *) t ); if ( heap == -1 ) { UNLOCK_HARDWARE( rmesa ); return -1; } /* Set the base offset of the texture image */ t->bufAddr = rmesa->radeonScreen->texOffset[heap] + t->base.memBlock->ofs; t->pp_txoffset = t->bufAddr; /* Mark this texobj as dirty on all units: */ t->dirty_state = TEX_ALL; } /* Let the world know we've used this memory recently. */ driUpdateTextureLRU( (driTextureObject *) t ); UNLOCK_HARDWARE( rmesa ); /* Upload any images that are new */ if (t->base.dirty_images[face]) { int i; for ( i = 0 ; i < numLevels ; i++ ) { if ( (t->base.dirty_images[face] & (1 << (i+t->base.firstLevel))) != 0 ) { uploadSubImage( rmesa, t, i, 0, 0, t->image[face][i].width, t->image[face][i].height, face ); } } t->base.dirty_images[face] = 0; } return 0; }
static void ffbBufferSize(GLframebuffer *buffer, GLuint *width, GLuint *height) { GET_CURRENT_CONTEXT(ctx); ffbContextPtr fmesa = FFB_CONTEXT(ctx); LOCK_HARDWARE(fmesa); *width = fmesa->driDrawable->w; *height = fmesa->driDrawable->h; UNLOCK_HARDWARE(fmesa); }
/* Make sure all commands have been sent to the hardware and have * completed processing. */ static void sisFinish( GLcontext *ctx ) { sisContextPtr smesa = SIS_CONTEXT(ctx); SIS_FIREVERTICES(smesa); LOCK_HARDWARE(); WaitEngIdle( smesa ); UNLOCK_HARDWARE(); }
/* Even when doing full software rendering we need to * wrap render{start,finish} so that the hardware is kept * in sync (because multipass rendering changes the write * buffer etc.) */ static void ffbSWRenderStart(GLcontext *ctx) { ffbContextPtr fmesa = FFB_CONTEXT(ctx); LOCK_HARDWARE(fmesa); fmesa->hw_locked = 1; if (fmesa->state_dirty != 0) ffbSyncHardware(fmesa); }
/* Send all commands to the hardware. If vertex buffers or indirect * buffers are in use, then we need to make sure they are sent to the * hardware. All commands that are normally sent to the ring are * already considered `flushed'. */ static void mach64DDFlush( GLcontext *ctx ) { mach64ContextPtr mmesa = MACH64_CONTEXT(ctx); LOCK_HARDWARE( mmesa ); FLUSH_DMA_LOCKED( mmesa ); UNLOCK_HARDWARE( mmesa ); #if ENABLE_PERF_BOXES if ( mmesa->boxes ) { LOCK_HARDWARE( mmesa ); mach64PerformanceBoxesLocked( mmesa ); UNLOCK_HARDWARE( mmesa ); } /* Log the performance counters if necessary */ mach64PerformanceCounters( mmesa ); #endif }
void mgaWaitAge( mgaContextPtr mmesa, int age ) { if (GET_DISPATCH_AGE(mmesa) < age) { LOCK_HARDWARE(mmesa); if (GET_DISPATCH_AGE(mmesa) < age) { UPDATE_LOCK( mmesa, DRM_LOCK_FLUSH ); } UNLOCK_HARDWARE(mmesa); } }
static void tdfxFlush( GLcontext *ctx ) { tdfxContextPtr fxMesa = TDFX_CONTEXT(ctx); FLUSH_BATCH( fxMesa ); LOCK_HARDWARE( fxMesa ); fxMesa->Glide.grFlush(); UNLOCK_HARDWARE( fxMesa ); }
/* XXX: Do this for TexSubImage also: */ static GLboolean try_pbo_upload(struct intel_context *intel, struct intel_texture_image *intelImage, const struct gl_pixelstore_attrib *unpack, GLint internalFormat, GLint width, GLint height, GLenum format, GLenum type, const void *pixels) { struct intel_buffer_object *pbo = intel_buffer_object(unpack->BufferObj); GLuint src_offset, src_stride; GLuint dst_offset, dst_stride; if (!pbo || intel->ctx._ImageTransferState || unpack->SkipPixels || unpack->SkipRows) { _mesa_printf("%s: failure 1\n", __FUNCTION__); return GL_FALSE; } src_offset = (GLuint) pixels; if (unpack->RowLength > 0) src_stride = unpack->RowLength; else src_stride = width; dst_offset = intel_miptree_image_offset(intelImage->mt, intelImage->face, intelImage->level); dst_stride = intelImage->mt->pitch; intelFlush(&intel->ctx); LOCK_HARDWARE(intel); { struct _DriBufferObject *src_buffer = intel_bufferobj_buffer(intel, pbo, INTEL_READ); struct _DriBufferObject *dst_buffer = intel_region_buffer(intel->intelScreen, intelImage->mt->region, INTEL_WRITE_FULL); intelEmitCopyBlit(intel, intelImage->mt->cpp, src_stride, src_buffer, src_offset, dst_stride, dst_buffer, dst_offset, 0, 0, 0, 0, width, height, GL_COPY); intel_batchbuffer_flush(intel->batch); } UNLOCK_HARDWARE(intel); return GL_TRUE; }
void intelFlushBatch( intelContextPtr intel, GLboolean refill ) { if (intel->locked) { intelFlushBatchLocked( intel, GL_FALSE, refill, GL_FALSE ); } else { LOCK_HARDWARE(intel); intelFlushBatchLocked( intel, GL_FALSE, refill, GL_TRUE ); UNLOCK_HARDWARE(intel); } }
/* Return the width and height of the current color buffer */ static void nouveauGetBufferSize( GLframebuffer *buffer, GLuint *width, GLuint *height ) { GET_CURRENT_CONTEXT(ctx); nouveauContextPtr nmesa = NOUVEAU_CONTEXT(ctx); LOCK_HARDWARE( nmesa ); *width = nmesa->driDrawable->w; *height = nmesa->driDrawable->h; UNLOCK_HARDWARE( nmesa ); }
/* Return the width and height of the current color buffer. */ static void r128DDGetBufferSize( GLframebuffer *buffer, GLuint *width, GLuint *height ) { GET_CURRENT_CONTEXT(ctx); r128ContextPtr rmesa = R128_CONTEXT(ctx); LOCK_HARDWARE( rmesa ); *width = rmesa->driDrawable->w; *height = rmesa->driDrawable->h; UNLOCK_HARDWARE( rmesa ); }
/* Return the current color buffer size. */ static void mach64DDGetBufferSize( GLframebuffer *buffer, GLuint *width, GLuint *height ) { GET_CURRENT_CONTEXT(ctx); mach64ContextPtr mmesa = MACH64_CONTEXT(ctx); LOCK_HARDWARE( mmesa ); *width = mmesa->driDrawable->w; *height = mmesa->driDrawable->h; UNLOCK_HARDWARE( mmesa ); }
static void do_draw_pix( GLcontext *ctx, GLint x, GLint y, GLsizei width, GLsizei height, GLint pitch, const void *pixels, GLuint dest ) { intelContextPtr intel = INTEL_CONTEXT(ctx); __DRIdrawablePrivate *dPriv = intel->driDrawable; drm_clip_rect_t *box = dPriv->pClipRects; int nbox = dPriv->numClipRects; int i; int src_offset = intelAgpOffsetFromVirtual( intel, pixels); int src_pitch = pitch; assert(src_offset != ~0); /* should be caught earlier */ if (INTEL_DEBUG & DEBUG_PIXEL) fprintf(stderr, "%s\n", __FUNCTION__); intelFlush( &intel->ctx ); LOCK_HARDWARE( intel ); if (ctx->DrawBuffer) { y -= height; /* cope with pixel zoom */ if (!clip_pixelrect(ctx, ctx->DrawBuffer, &x, &y, &width, &height)) { UNLOCK_HARDWARE( intel ); return; } y = dPriv->h - y - height; /* convert from gl to hardware coords */ x += dPriv->x; y += dPriv->y; for (i = 0 ; i < nbox ; i++ ) { GLint bx, by, bw, bh; if (intersect_region(box + i, x, y, width, height, &bx, &by, &bw, &bh)) { intelEmitCopyBlitLocked( intel, intel->intelScreen->cpp, src_pitch, src_offset, intel->intelScreen->front.pitch, intel->drawRegion->offset, bx - x, by - y, bx, by, bw, bh ); } } } UNLOCK_HARDWARE( intel ); intelFinish( &intel->ctx ); }
/* Return the width and height of the given buffer. */ static void radeonGetBufferSize( GLframebuffer *buffer, GLuint *width, GLuint *height ) { GET_CURRENT_CONTEXT(ctx); radeonContextPtr rmesa = RADEON_CONTEXT(ctx); LOCK_HARDWARE( rmesa ); *width = rmesa->dri.drawable->w; *height = rmesa->dri.drawable->h; UNLOCK_HARDWARE( rmesa ); }
/* Return uptodate buffer size information. */ static void tdfxDDGetBufferSize( GLframebuffer *buffer, GLuint *width, GLuint *height ) { GET_CURRENT_CONTEXT(ctx); tdfxContextPtr fxMesa = TDFX_CONTEXT(ctx); LOCK_HARDWARE( fxMesa ); *width = fxMesa->width; *height = fxMesa->height; UNLOCK_HARDWARE( fxMesa ); }
/* Return the width and height of the current color buffer. */ static void tridentDDGetBufferSize( GLframebuffer *framebuffer, GLuint *width, GLuint *height ) { GET_CURRENT_CONTEXT(ctx); tridentContextPtr tmesa = TRIDENT_CONTEXT(ctx); LOCK_HARDWARE(tmesa); *width = tmesa->driDrawable->w; *height = tmesa->driDrawable->h; UNLOCK_HARDWARE(tmesa); }
/* Return the width and height of the given buffer. */ static void sisGetBufferSize( GLframebuffer *buffer, GLuint *width, GLuint *height ) { GET_CURRENT_CONTEXT(ctx); sisContextPtr smesa = SIS_CONTEXT(ctx); LOCK_HARDWARE(); *width = smesa->driDrawable->w; *height = smesa->driDrawable->h; UNLOCK_HARDWARE(); }
/** * Implement the hardware-specific portion of \c glFlush. * * \param ctx Context to be flushed. * * \sa glFlush, mgaFinish, mgaFlushDMA */ static void mgaFlush( GLcontext *ctx ) { mgaContextPtr mmesa = MGA_CONTEXT( ctx ); LOCK_HARDWARE( mmesa ); if ( mmesa->vertex_dma_buffer != NULL ) { mgaFlushVerticesLocked( mmesa ); } UPDATE_LOCK( mmesa, DRM_LOCK_FLUSH ); UNLOCK_HARDWARE( mmesa ); }
/* Make sure all commands have been sent to the hardware and have * completed processing. */ void r200Finish( GLcontext *ctx ) { r200ContextPtr rmesa = R200_CONTEXT(ctx); r200Flush( ctx ); if (rmesa->do_irqs) { LOCK_HARDWARE( rmesa ); r200EmitIrqLocked( rmesa ); UNLOCK_HARDWARE( rmesa ); r200WaitIrq( rmesa ); } else r200WaitForIdle( rmesa ); }
static void tdfxBeginQuery(GLcontext *ctx, struct gl_query_object *q) { tdfxContextPtr fxMesa = TDFX_CONTEXT(ctx); (void) q; if (q->Target == GL_SAMPLES_PASSED_ARB) { LOCK_HARDWARE(fxMesa); fxMesa->Glide.grFinish(); fxMesa->Glide.grReset(GR_STATS_PIXELS); UNLOCK_HARDWARE(fxMesa); } }