void intelFinish( GLcontext *ctx ) { intelContextPtr intel = INTEL_CONTEXT( ctx ); intelFlush( ctx ); intelWaitForIdle( intel ); intelCheckFrontRotate(ctx); }
void intelReadPixels(GLcontext * ctx, GLint x, GLint y, GLsizei width, GLsizei height, GLenum format, GLenum type, const struct gl_pixelstore_attrib *pack, GLvoid * pixels) { if (INTEL_DEBUG & DEBUG_PIXEL) fprintf(stderr, "%s\n", __FUNCTION__); intelFlush(ctx); intel_prepare_render(intel_context(ctx)); if (do_blit_readpixels (ctx, x, y, width, height, format, type, pack, pixels)) return; if (INTEL_DEBUG & DEBUG_PIXEL) printf("%s: fallback to swrast\n", __FUNCTION__); /* Update Mesa state before calling down into _swrast_ReadPixels, as * the spans code requires the computed buffer states to be up to date, * but _swrast_ReadPixels only updates Mesa state after setting up * the spans code. */ if (ctx->NewState) _mesa_update_state(ctx); _swrast_ReadPixels(ctx, x, y, width, height, format, type, pack, pixels); }
/* Move locking out to get reasonable span performance. */ void intelSpanRenderStart( GLcontext *ctx ) { intelContextPtr intel = INTEL_CONTEXT(ctx); intelFlush(&intel->ctx); LOCK_HARDWARE(intel); intelWaitForIdle(intel); }
/* XXX: Do this for TexSubImage also: */ static GLboolean try_pbo_upload(struct intel_context *intel, struct intel_texture_image *intelImage, const struct gl_pixelstore_attrib *unpack, GLint internalFormat, GLint width, GLint height, GLenum format, GLenum type, const void *pixels) { struct intel_buffer_object *pbo = intel_buffer_object(unpack->BufferObj); GLuint src_offset, src_stride; GLuint dst_offset, dst_stride; if (!pbo || intel->ctx._ImageTransferState || unpack->SkipPixels || unpack->SkipRows) { _mesa_printf("%s: failure 1\n", __FUNCTION__); return GL_FALSE; } src_offset = (GLuint) pixels; if (unpack->RowLength > 0) src_stride = unpack->RowLength; else src_stride = width; dst_offset = intel_miptree_image_offset(intelImage->mt, intelImage->face, intelImage->level); dst_stride = intelImage->mt->pitch; intelFlush(&intel->ctx); LOCK_HARDWARE(intel); { struct _DriBufferObject *src_buffer = intel_bufferobj_buffer(intel, pbo, INTEL_READ); struct _DriBufferObject *dst_buffer = intel_region_buffer(intel->intelScreen, intelImage->mt->region, INTEL_WRITE_FULL); intelEmitCopyBlit(intel, intelImage->mt->cpp, src_stride, src_buffer, src_offset, dst_stride, dst_buffer, dst_offset, 0, 0, 0, 0, width, height, GL_COPY); intel_batchbuffer_flush(intel->batch); } UNLOCK_HARDWARE(intel); return GL_TRUE; }
/** * intelConfigureRx - Configure Receive Unit after Reset * @adapter: board private structure * * Configure the Rx unit of the MAC after a reset. **/ void IntelMausi::intelConfigureRx(struct e1000_adapter *adapter) { struct e1000_hw *hw = &adapter->hw; u32 rctl, rxcsum, ctrl_ext; /* disable receives while setting up the descriptors */ rctl = intelReadMem32(E1000_RCTL); intelFlush(); usleep_range(10000, 20000); /* set the Receive Delay Timer Register */ intelWriteMem32(E1000_RDTR, adapter->rx_int_delay); /* irq moderation */ intelWriteMem32(E1000_RADV, adapter->rx_abs_int_delay); /* Set interrupt throttle value. */ intelWriteMem32(E1000_ITR, intrThrValue); /* Auto-Mask interrupts upon ICR access. */ ctrl_ext = intelReadMem32(E1000_CTRL_EXT); ctrl_ext |= E1000_CTRL_EXT_IAME; intelWriteMem32(E1000_IAM, 0xffffffff); intelWriteMem32(E1000_CTRL_EXT, ctrl_ext); e1e_flush(); /* Setup the HW Rx Head and Tail Descriptor Pointers and * the Base and Length of the Rx Descriptor Ring */ intelInitRxRing(); /* Enable Receive Checksum Offload for TCP and UDP */ rxcsum = intelReadMem32(E1000_RXCSUM); rxcsum |= E1000_RXCSUM_TUOFL; intelWriteMem32(E1000_RXCSUM, rxcsum); /* With jumbo frames, excessive C-state transition latencies result * in dropped transactions. */ if (mtu > ETH_DATA_LEN) { //u32 lat = ((intelReadMem32(E1000_PBA) & E1000_PBA_RXA_MASK) * 1024 - adapter->max_frame_size) * 8 / 1000; if (adapter->flags & FLAG_IS_ICH) { u32 rxdctl = intelReadMem32(E1000_RXDCTL(0)); intelWriteMem32(E1000_RXDCTL(0), rxdctl | 0x3); } //pm_qos_update_request(&adapter->netdev->pm_qos_req, lat); } else { //pm_qos_update_request(&adapter->netdev->pm_qos_req, PM_QOS_DEFAULT_VALUE); } intelWriteMem32(E1000_RCTL, rctl); }
static void do_draw_pix( GLcontext *ctx, GLint x, GLint y, GLsizei width, GLsizei height, GLint pitch, const void *pixels, GLuint dest ) { intelContextPtr intel = INTEL_CONTEXT(ctx); __DRIdrawablePrivate *dPriv = intel->driDrawable; drm_clip_rect_t *box = dPriv->pClipRects; int nbox = dPriv->numClipRects; int i; int src_offset = intelAgpOffsetFromVirtual( intel, pixels); int src_pitch = pitch; assert(src_offset != ~0); /* should be caught earlier */ if (INTEL_DEBUG & DEBUG_PIXEL) fprintf(stderr, "%s\n", __FUNCTION__); intelFlush( &intel->ctx ); LOCK_HARDWARE( intel ); if (ctx->DrawBuffer) { y -= height; /* cope with pixel zoom */ if (!clip_pixelrect(ctx, ctx->DrawBuffer, &x, &y, &width, &height)) { UNLOCK_HARDWARE( intel ); return; } y = dPriv->h - y - height; /* convert from gl to hardware coords */ x += dPriv->x; y += dPriv->y; for (i = 0 ; i < nbox ; i++ ) { GLint bx, by, bw, bh; if (intersect_region(box + i, x, y, width, height, &bx, &by, &bw, &bh)) { intelEmitCopyBlitLocked( intel, intel->intelScreen->cpp, src_pitch, src_offset, intel->intelScreen->front.pitch, intel->drawRegion->offset, bx - x, by - y, bx, by, bw, bh ); } } } UNLOCK_HARDWARE( intel ); intelFinish( &intel->ctx ); }
/** * Called via glFramebufferRenderbufferEXT(). */ static void intel_framebuffer_renderbuffer(GLcontext * ctx, struct gl_framebuffer *fb, GLenum attachment, struct gl_renderbuffer *rb) { DBG("Intel FramebufferRenderbuffer %u %u\n", fb->Name, rb ? rb->Name : 0); intelFlush(ctx); _mesa_framebuffer_renderbuffer(ctx, fb, attachment, rb); intel_draw_buffer(ctx, fb); }
static void intel_fence_sync(GLcontext *ctx, struct gl_sync_object *s, GLenum condition, GLbitfield flags) { struct intel_context *intel = intel_context(ctx); struct intel_sync_object *sync = (struct intel_sync_object *)s; assert(condition == GL_SYNC_GPU_COMMANDS_COMPLETE); intel_batchbuffer_emit_mi_flush(intel->batch); sync->bo = intel->batch->buf; drm_intel_bo_reference(sync->bo); intelFlush(ctx); }
/** * Called via glMapBufferARB(). */ static void * intel_bufferobj_map(GLcontext * ctx, GLenum target, GLenum access, struct gl_buffer_object *obj) { struct intel_context *intel = intel_context(ctx); struct intel_buffer_object *intel_obj = intel_buffer_object(obj); GLboolean read_only = (access == GL_READ_ONLY_ARB); GLboolean write_only = (access == GL_WRITE_ONLY_ARB); assert(intel_obj); if (intel_obj->sys_buffer) { obj->Pointer = intel_obj->sys_buffer; obj->Length = obj->Size; obj->Offset = 0; return obj->Pointer; } /* Flush any existing batchbuffer that might reference this data. */ if (drm_intel_bo_references(intel->batch->buf, intel_obj->buffer)) intelFlush(ctx); if (intel_obj->region) intel_bufferobj_cow(intel, intel_obj); if (intel_obj->buffer == NULL) { obj->Pointer = NULL; return NULL; } if (write_only && intel->intelScreen->kernel_exec_fencing) { drm_intel_gem_bo_map_gtt(intel_obj->buffer); intel_obj->mapped_gtt = GL_TRUE; } else { drm_intel_bo_map(intel_obj->buffer, !read_only); intel_obj->mapped_gtt = GL_FALSE; } obj->Pointer = intel_obj->buffer->virtual; obj->Length = obj->Size; obj->Offset = 0; return obj->Pointer; }
static void i830TexParameter( GLcontext *ctx, GLenum target, struct gl_texture_object *tObj, GLenum pname, const GLfloat *params ) { i830TextureObjectPtr t = (i830TextureObjectPtr) tObj->DriverData; if (!t) return; switch (pname) { case GL_TEXTURE_MIN_FILTER: case GL_TEXTURE_MAG_FILTER: case GL_TEXTURE_MAX_ANISOTROPY_EXT: i830SetTexFilter( t, tObj->MinFilter, tObj->MagFilter, tObj->MaxAnisotropy); break; case GL_TEXTURE_WRAP_S: case GL_TEXTURE_WRAP_T: i830SetTexWrapping( t, tObj->WrapS, tObj->WrapT ); break; case GL_TEXTURE_BORDER_COLOR: i830SetTexBorderColor( t, tObj->_BorderChan ); break; case GL_TEXTURE_BASE_LEVEL: case GL_TEXTURE_MAX_LEVEL: case GL_TEXTURE_MIN_LOD: case GL_TEXTURE_MAX_LOD: /* The i830 and its successors can do a lot of this without * reloading the textures. A project for someone? */ intelFlush( ctx ); driSwapOutTextureObject( (driTextureObject *) t ); break; default: return; } t->intel.dirty = I830_UPLOAD_TEX_ALL; }
void intelFallback( intelContextPtr intel, GLuint bit, GLboolean mode ) { GLcontext *ctx = &intel->ctx; TNLcontext *tnl = TNL_CONTEXT(ctx); GLuint oldfallback = intel->Fallback; if (mode) { intel->Fallback |= bit; if (oldfallback == 0) { intelFlush(ctx); if (INTEL_DEBUG & DEBUG_FALLBACKS) fprintf(stderr, "ENTER FALLBACK 0x%x: %s\n", bit, getFallbackString(bit)); _swsetup_Wakeup( ctx ); intel->RenderIndex = ~0; } } else { intel->Fallback &= ~bit; if (oldfallback == bit) { _swrast_flush( ctx ); if (INTEL_DEBUG & DEBUG_FALLBACKS) fprintf(stderr, "LEAVE FALLBACK 0x%x: %s\n", bit, getFallbackString(bit)); tnl->Driver.Render.Start = intelRenderStart; tnl->Driver.Render.PrimitiveNotify = intelRenderPrimitive; tnl->Driver.Render.Finish = intelRenderFinish; tnl->Driver.Render.BuildVertices = _tnl_build_vertices; tnl->Driver.Render.CopyPV = _tnl_copy_pv; tnl->Driver.Render.Interp = _tnl_interp; _tnl_invalidate_vertex_state( ctx, ~0 ); _tnl_invalidate_vertices( ctx, ~0 ); _tnl_install_attrs( ctx, intel->vertex_attrs, intel->vertex_attr_count, intel->ViewportMatrix.m, 0 ); intel->NewGLState |= _INTEL_NEW_RENDERSTATE; } } }
void intelFinish(GLcontext * ctx) { struct gl_framebuffer *fb = ctx->DrawBuffer; int i; intelFlush(ctx); for (i = 0; i < fb->_NumColorDrawBuffers; i++) { struct intel_renderbuffer *irb; irb = intel_renderbuffer(fb->_ColorDrawBuffers[i]); if (irb && irb->region) dri_bo_wait_rendering(irb->region->buffer); } if (fb->_DepthBuffer) { /* XXX: Wait on buffer idle */ } }
/* XXX: Thread safety? */ GLubyte * intel_region_map(struct intel_context *intel, struct intel_region *region) { intelFlush(&intel->ctx); _DBG("%s %p\n", __FUNCTION__, region); if (!region->map_refcount++) { if (region->pbo) intel_region_cow(intel, region); if (region->tiling != I915_TILING_NONE && intel->intelScreen->kernel_exec_fencing) drm_intel_gem_bo_map_gtt(region->buffer); else dri_bo_map(region->buffer, GL_TRUE); region->map = region->buffer->virtual; } return region->map; }
/** * intelDown - quiesce the device and optionally reset the hardware * @adapter: board private structure * @reset: boolean flag to reset the hardware or not */ void IntelMausi::intelDown(struct e1000_adapter *adapter, bool reset) { struct e1000_hw *hw = &adapter->hw; UInt32 tctl, rctl; /* signal that we're down so the interrupt handler does not * reschedule our watchdog timer */ set_bit(__E1000_DOWN, &adapter->state); /* disable receives in the hardware */ rctl = intelReadMem32(E1000_RCTL); rctl &= ~E1000_RCTL_EN; intelWriteMem32(E1000_RCTL, rctl); /* flush and sleep below */ /* disable transmits in the hardware */ tctl = intelReadMem32(E1000_TCTL); tctl &= ~E1000_TCTL_EN; intelWriteMem32(E1000_TCTL, tctl); /* flush both disables and wait for them to finish */ intelFlush(); usleep_range(10000, 20000); intelDisableIRQ(); updateStatistics(adapter); clearDescriptors(); adapter->link_speed = 0; adapter->link_duplex = 0; /* Disable Si errata workaround on PCHx for jumbo frame flow */ if ((hw->mac.type >= e1000_pch2lan) && (mtu > ETH_DATA_LEN) && e1000_lv_jumbo_workaround_ich8lan(hw, false)) DebugLog("Ethernet [IntelMausi]: failed to disable jumbo frame workaround mode\n"); if (reset) intelReset(adapter); }
static void i915TexParameter( GLcontext *ctx, GLenum target, struct gl_texture_object *tObj, GLenum pname, const GLfloat *params ) { i915TextureObjectPtr t = (i915TextureObjectPtr) tObj->DriverData; switch (pname) { case GL_TEXTURE_MIN_FILTER: case GL_TEXTURE_MAG_FILTER: case GL_TEXTURE_MAX_ANISOTROPY_EXT: case GL_TEXTURE_WRAP_S: case GL_TEXTURE_WRAP_T: case GL_TEXTURE_WRAP_R: case GL_TEXTURE_BORDER_COLOR: t->intel.dirty = I915_UPLOAD_TEX_ALL; break; case GL_TEXTURE_COMPARE_MODE: t->intel.dirty = I915_UPLOAD_TEX_ALL; break; case GL_TEXTURE_COMPARE_FUNC: t->intel.dirty = I915_UPLOAD_TEX_ALL; break; case GL_TEXTURE_BASE_LEVEL: case GL_TEXTURE_MAX_LEVEL: case GL_TEXTURE_MIN_LOD: case GL_TEXTURE_MAX_LOD: /* The i915 and its successors can do a lot of this without * reloading the textures. A project for someone? */ intelFlush( ctx ); driSwapOutTextureObject( (driTextureObject *) t ); t->intel.dirty = I915_UPLOAD_TEX_ALL; break; default: return; } }
/* Flip the front & back buffes */ void intelPageFlip( const __DRIdrawablePrivate *dPriv ) { #if 0 intelContextPtr intel; int tmp, ret; if (INTEL_DEBUG & DEBUG_IOCTL) fprintf(stderr, "%s\n", __FUNCTION__); assert(dPriv); assert(dPriv->driContextPriv); assert(dPriv->driContextPriv->driverPrivate); intel = (intelContextPtr) dPriv->driContextPriv->driverPrivate; intelFlush( &intel->ctx ); LOCK_HARDWARE( intel ); if (dPriv->pClipRects) { *(drm_clip_rect_t *)intel->sarea->boxes = dPriv->pClipRects[0]; intel->sarea->nbox = 1; } ret = drmCommandNone(intel->driFd, DRM_I830_FLIP); if (ret) { fprintf(stderr, "%s: %d\n", __FUNCTION__, ret); UNLOCK_HARDWARE( intel ); exit(1); } tmp = intel->sarea->last_enqueue; intelRefillBatchLocked( intel ); UNLOCK_HARDWARE( intel ); intelSetDrawBuffer( &intel->ctx, intel->ctx.Color.DriverDrawBuffer ); #endif }
/** * Called via glMapBufferRange(). * * The goal of this extension is to allow apps to accumulate their rendering * at the same time as they accumulate their buffer object. Without it, * you'd end up blocking on execution of rendering every time you mapped * the buffer to put new data in. * * We support it in 3 ways: If unsynchronized, then don't bother * flushing the batchbuffer before mapping the buffer, which can save blocking * in many cases. If we would still block, and they allow the whole buffer * to be invalidated, then just allocate a new buffer to replace the old one. * If not, and we'd block, and they allow the subrange of the buffer to be * invalidated, then we can make a new little BO, let them write into that, * and blit it into the real BO at unmap time. */ static void * intel_bufferobj_map_range(GLcontext * ctx, GLenum target, GLintptr offset, GLsizeiptr length, GLbitfield access, struct gl_buffer_object *obj) { struct intel_context *intel = intel_context(ctx); struct intel_buffer_object *intel_obj = intel_buffer_object(obj); assert(intel_obj); /* _mesa_MapBufferRange (GL entrypoint) sets these, but the vbo module also * internally uses our functions directly. */ obj->Offset = offset; obj->Length = length; obj->AccessFlags = access; if (intel_obj->sys_buffer) { obj->Pointer = intel_obj->sys_buffer + offset; return obj->Pointer; } if (intel_obj->region) intel_bufferobj_cow(intel, intel_obj); /* If the mapping is synchronized with other GL operations, flush * the batchbuffer so that GEM knows about the buffer access for later * syncing. */ if (!(access & GL_MAP_UNSYNCHRONIZED_BIT) && drm_intel_bo_references(intel->batch->buf, intel_obj->buffer)) intelFlush(ctx); if (intel_obj->buffer == NULL) { obj->Pointer = NULL; return NULL; } /* If the user doesn't care about existing buffer contents and mapping * would cause us to block, then throw out the old buffer. */ if (!(access & GL_MAP_UNSYNCHRONIZED_BIT) && (access & GL_MAP_INVALIDATE_BUFFER_BIT) && drm_intel_bo_busy(intel_obj->buffer)) { drm_intel_bo_unreference(intel_obj->buffer); intel_obj->buffer = dri_bo_alloc(intel->bufmgr, "bufferobj", intel_obj->Base.Size, 64); } /* If the user is mapping a range of an active buffer object but * doesn't require the current contents of that range, make a new * BO, and we'll copy what they put in there out at unmap or * FlushRange time. */ if ((access & GL_MAP_INVALIDATE_RANGE_BIT) && drm_intel_bo_busy(intel_obj->buffer)) { if (access & GL_MAP_FLUSH_EXPLICIT_BIT) { intel_obj->range_map_buffer = _mesa_malloc(length); obj->Pointer = intel_obj->range_map_buffer; } else { intel_obj->range_map_bo = drm_intel_bo_alloc(intel->bufmgr, "range map", length, 64); if (!(access & GL_MAP_READ_BIT) && intel->intelScreen->kernel_exec_fencing) { drm_intel_gem_bo_map_gtt(intel_obj->range_map_bo); intel_obj->mapped_gtt = GL_TRUE; } else { drm_intel_bo_map(intel_obj->range_map_bo, (access & GL_MAP_WRITE_BIT) != 0); intel_obj->mapped_gtt = GL_FALSE; } obj->Pointer = intel_obj->range_map_bo->virtual; } return obj->Pointer; } if (!(access & GL_MAP_READ_BIT) && intel->intelScreen->kernel_exec_fencing) { drm_intel_gem_bo_map_gtt(intel_obj->buffer); intel_obj->mapped_gtt = GL_TRUE; } else { drm_intel_bo_map(intel_obj->buffer, (access & GL_MAP_WRITE_BIT) != 0); intel_obj->mapped_gtt = GL_FALSE; } obj->Pointer = intel_obj->buffer->virtual + offset; return obj->Pointer; }
/** * Use blitting to clear the renderbuffers named by 'flags'. * Note: we can't use the ctx->DrawBuffer->_ColorDrawBufferMask field * since that might include software renderbuffers or renderbuffers * which we're clearing with triangles. * \param mask bitmask of BUFFER_BIT_* values indicating buffers to clear */ void intelClearWithBlit(GLcontext *ctx, GLbitfield mask, GLboolean all, GLint cx, GLint cy, GLint cw, GLint ch) { struct intel_context *intel = intel_context( ctx ); GLuint clear_depth; GLbitfield skipBuffers = 0; BATCH_LOCALS; if (INTEL_DEBUG & DEBUG_DRI) _mesa_printf("%s %x\n", __FUNCTION__, mask); /* * Compute values for clearing the buffers. */ clear_depth = 0; if (mask & BUFFER_BIT_DEPTH) { clear_depth = (GLuint) (ctx->DrawBuffer->_DepthMax * ctx->Depth.Clear); } if (mask & BUFFER_BIT_STENCIL) { clear_depth |= (ctx->Stencil.Clear & 0xff) << 24; } /* If clearing both depth and stencil, skip BUFFER_BIT_STENCIL in * the loop below. */ if ((mask & BUFFER_BIT_DEPTH) && (mask & BUFFER_BIT_STENCIL)) { skipBuffers = BUFFER_BIT_STENCIL; } /* XXX Move this flush/lock into the following conditional? */ intelFlush( &intel->ctx ); LOCK_HARDWARE( intel ); if (intel->numClipRects) { drm_clip_rect_t clear; int i; if (intel->ctx.DrawBuffer->Name == 0) { /* clearing a window */ /* flip top to bottom */ clear.x1 = cx + intel->drawX; clear.y1 = intel->driDrawable->y + intel->driDrawable->h - cy - ch; clear.x2 = clear.x1 + cw; clear.y2 = clear.y1 + ch; /* adjust for page flipping */ if ( intel->sarea->pf_current_page == 1 ) { const GLuint tmp = mask; mask &= ~(BUFFER_BIT_FRONT_LEFT | BUFFER_BIT_BACK_LEFT); if ( tmp & BUFFER_BIT_FRONT_LEFT ) mask |= BUFFER_BIT_BACK_LEFT; if ( tmp & BUFFER_BIT_BACK_LEFT ) mask |= BUFFER_BIT_FRONT_LEFT; } } else { /* clearing FBO */ ASSERT(intel->numClipRects == 1); ASSERT(intel->pClipRects == &intel->fboRect); clear.x1 = cx; clear.y1 = intel->ctx.DrawBuffer->Height - cy - ch; clear.x2 = clear.y1 + cw; clear.y2 = clear.y1 + ch; /* no change to mask */ } for (i = 0 ; i < intel->numClipRects ; i++) { const drm_clip_rect_t *box = &intel->pClipRects[i]; drm_clip_rect_t b; GLuint buf; GLuint clearMask = mask; /* use copy, since we modify it below */ if (!all) { intel_intersect_cliprects(&b, &clear, box); } else { b = *box; } if (0) _mesa_printf("clear %d,%d..%d,%d, mask %x\n", b.x1, b.y1, b.x2, b.y2, mask); /* Loop over all renderbuffers */ for (buf = 0; buf < BUFFER_COUNT && clearMask; buf++) { const GLbitfield bufBit = 1 << buf; if ((clearMask & bufBit) && !(bufBit & skipBuffers)) { /* OK, clear this renderbuffer */ const struct intel_renderbuffer *irb = intel_renderbuffer(ctx->DrawBuffer-> Attachment[buf].Renderbuffer); GLuint clearVal; GLint pitch, cpp; GLuint BR13, CMD; ASSERT(irb); ASSERT(irb->region); pitch = irb->region->pitch; cpp = irb->region->cpp; /* Setup the blit command */ if (cpp == 4) { BR13 = (0xF0 << 16) | (pitch * cpp) | (1<<24) | (1<<25); if (buf == BUFFER_DEPTH || buf == BUFFER_STENCIL) { CMD = XY_COLOR_BLT_CMD; if (clearMask & BUFFER_BIT_DEPTH) CMD |= XY_COLOR_BLT_WRITE_RGB; if (clearMask & BUFFER_BIT_STENCIL) CMD |= XY_COLOR_BLT_WRITE_ALPHA; } else { /* clearing RGBA */ CMD = (XY_COLOR_BLT_CMD | XY_COLOR_BLT_WRITE_ALPHA | XY_COLOR_BLT_WRITE_RGB); } } else { ASSERT(cpp == 2 || cpp == 0); BR13 = (0xF0 << 16) | (pitch * cpp) | (1<<24); CMD = XY_COLOR_BLT_CMD; } if (buf == BUFFER_DEPTH || buf == BUFFER_STENCIL) { clearVal = clear_depth; } else { clearVal = (cpp == 4) ? intel->ClearColor8888 : intel->ClearColor565; } /* _mesa_debug(ctx, "hardware blit clear buf %d rb id %d\n", buf, irb->Base.Name); */ BEGIN_BATCH(6, INTEL_BATCH_NO_CLIPRECTS); OUT_BATCH( CMD ); OUT_BATCH( BR13 ); OUT_BATCH( (b.y1 << 16) | b.x1 ); OUT_BATCH( (b.y2 << 16) | b.x2 ); OUT_RELOC( irb->region->buffer, DRM_MM_TT|DRM_MM_WRITE, irb->region->draw_offset ); OUT_BATCH( clearVal ); ADVANCE_BATCH(); clearMask &= ~bufBit; /* turn off bit, for faster loop exit */ } } } intel_batchbuffer_flush( intel->batch ); } UNLOCK_HARDWARE( intel ); }
static void do_draw_pix( GLcontext *ctx, GLint x, GLint y, GLsizei width, GLsizei height, GLint pitch, const void *pixels, GLuint dest ) { intelContextPtr intel = INTEL_CONTEXT(ctx); __DRIdrawablePrivate *dPriv = intel->driDrawable; drm_clip_rect_t *box = dPriv->pClipRects; int nbox = dPriv->numClipRects; int i; int size; int src_offset = intelAgpOffsetFromVirtual( intel, pixels); int src_pitch = pitch; if (INTEL_DEBUG & DEBUG_PIXEL) fprintf(stderr, "%s\n", __FUNCTION__); intelFlush( &intel->ctx ); LOCK_HARDWARE( intel ); if (ctx->DrawBuffer) { y -= height; /* cope with pixel zoom */ if (!clip_pixelrect(ctx, ctx->DrawBuffer, &x, &y, &width, &height, &size)) { UNLOCK_HARDWARE( intel ); return; } y = dPriv->h - y - height; /* convert from gl to hardware coords */ x += dPriv->x; y += dPriv->y; for (i = 0 ; i < nbox ; i++ ) { GLint bx = box[i].x1; GLint by = box[i].y1; GLint bw = box[i].x2 - bx; GLint bh = box[i].y2 - by; if (bx < x) bw -= x - bx, bx = x; if (by < y) bh -= y - by, by = y; if (bx + bw > x + width) bw = x + width - bx; if (by + bh > y + height) bh = y + height - by; if (bw <= 0) continue; if (bh <= 0) continue; intelEmitCopyBlitLocked( intel, intel->intelScreen->cpp, src_pitch, src_offset, intel->intelScreen->frontPitch, intel->drawOffset, bx - x, by - y, bx, by, bw, bh ); } } UNLOCK_HARDWARE( intel ); intelFinish( &intel->ctx ); }
void IntelMausi::intelDisableIRQ() { intelWriteMem32(E1000_IMC, 0xFFFFFFFF); intelFlush(); }
static void intelTexImage(GLcontext * ctx, GLint dims, GLenum target, GLint level, GLint internalFormat, GLint width, GLint height, GLint depth, GLint border, GLenum format, GLenum type, const void *pixels, const struct gl_pixelstore_attrib *unpack, struct gl_texture_object *texObj, struct gl_texture_image *texImage, GLsizei imageSize, int compressed) { struct intel_context *intel = intel_context(ctx); struct intel_texture_object *intelObj = intel_texture_object(texObj); struct intel_texture_image *intelImage = intel_texture_image(texImage); GLint postConvWidth = width; GLint postConvHeight = height; GLint texelBytes, sizeInBytes; GLuint dstRowStride; DBG("%s target %s level %d %dx%dx%d border %d\n", __FUNCTION__, _mesa_lookup_enum_by_nr(target), level, width, height, depth, border); intelFlush(ctx); intelImage->face = target_to_face(target); intelImage->level = level; if (ctx->_ImageTransferState & IMAGE_CONVOLUTION_BIT) { _mesa_adjust_image_for_convolution(ctx, dims, &postConvWidth, &postConvHeight); } /* choose the texture format */ texImage->TexFormat = intelChooseTextureFormat(ctx, internalFormat, format, type); _mesa_set_fetch_functions(texImage, dims); if (texImage->TexFormat->TexelBytes == 0) { /* must be a compressed format */ texelBytes = 0; texImage->IsCompressed = GL_TRUE; texImage->CompressedSize = ctx->Driver.CompressedTextureSize(ctx, texImage->Width, texImage->Height, texImage->Depth, texImage->TexFormat->MesaFormat); } else { texelBytes = texImage->TexFormat->TexelBytes; /* Minimum pitch of 32 bytes */ if (postConvWidth * texelBytes < 32) { postConvWidth = 32 / texelBytes; texImage->RowStride = postConvWidth; } assert(texImage->RowStride == postConvWidth); } /* Release the reference to a potentially orphaned buffer. * Release any old malloced memory. */ if (intelImage->mt) { intel_miptree_release(intel, &intelImage->mt); assert(!texImage->Data); } else if (texImage->Data) { _mesa_align_free(texImage->Data); } /* If this is the only texture image in the tree, could call * bmBufferData with NULL data to free the old block and avoid * waiting on any outstanding fences. */ if (intelObj->mt && intelObj->mt->first_level == level && intelObj->mt->last_level == level && intelObj->mt->target != GL_TEXTURE_CUBE_MAP_ARB && !intel_miptree_match_image(intelObj->mt, &intelImage->base, intelImage->face, intelImage->level)) { DBG("release it\n"); intel_miptree_release(intel, &intelObj->mt); assert(!intelObj->mt); } if (!intelObj->mt) { guess_and_alloc_mipmap_tree(intel, intelObj, intelImage); if (!intelObj->mt) { DBG("guess_and_alloc_mipmap_tree: failed\n"); } } assert(!intelImage->mt); if (intelObj->mt && intel_miptree_match_image(intelObj->mt, &intelImage->base, intelImage->face, intelImage->level)) { intel_miptree_reference(&intelImage->mt, intelObj->mt); assert(intelImage->mt); } if (!intelImage->mt) DBG("XXX: Image did not fit into tree - storing in local memory!\n"); /* PBO fastpaths: */ if (dims <= 2 && intelImage->mt && intel_buffer_object(unpack->BufferObj) && check_pbo_format(internalFormat, format, type, intelImage->base.TexFormat)) { DBG("trying pbo upload\n"); /* Attempt to texture directly from PBO data (zero copy upload). * * Currently disable as it can lead to worse as well as better * performance (in particular when intel_region_cow() is * required). */ if (intelObj->mt == intelImage->mt && intelObj->mt->first_level == level && intelObj->mt->last_level == level) { if (try_pbo_zcopy(intel, intelImage, unpack, internalFormat, width, height, format, type, pixels)) { DBG("pbo zcopy upload succeeded\n"); return; } } /* Otherwise, attempt to use the blitter for PBO image uploads. */ if (try_pbo_upload(intel, intelImage, unpack, internalFormat, width, height, format, type, pixels)) { DBG("pbo upload succeeded\n"); return; } DBG("pbo upload failed\n"); } /* intelCopyTexImage calls this function with pixels == NULL, with * the expectation that the mipmap tree will be set up but nothing * more will be done. This is where those calls return: */ if (compressed) { pixels = _mesa_validate_pbo_compressed_teximage(ctx, imageSize, pixels, unpack, "glCompressedTexImage"); } else { pixels = _mesa_validate_pbo_teximage(ctx, dims, width, height, 1, format, type, pixels, unpack, "glTexImage"); } if (!pixels) return; if (intelImage->mt) intel_region_idle(intel->intelScreen, intelImage->mt->region); LOCK_HARDWARE(intel); if (intelImage->mt) { texImage->Data = intel_miptree_image_map(intel, intelImage->mt, intelImage->face, intelImage->level, &dstRowStride, intelImage->base.ImageOffsets); } else { /* Allocate regular memory and store the image there temporarily. */ if (texImage->IsCompressed) { sizeInBytes = texImage->CompressedSize; dstRowStride = _mesa_compressed_row_stride(texImage->TexFormat->MesaFormat, width); assert(dims != 3); } else { dstRowStride = postConvWidth * texelBytes; sizeInBytes = depth * dstRowStride * postConvHeight; } texImage->Data = malloc(sizeInBytes); } DBG("Upload image %dx%dx%d row_len %x " "pitch %x\n", width, height, depth, width * texelBytes, dstRowStride); /* Copy data. Would like to know when it's ok for us to eg. use * the blitter to copy. Or, use the hardware to do the format * conversion and copy: */ if (compressed) { memcpy(texImage->Data, pixels, imageSize); } else if (!texImage->TexFormat->StoreImage(ctx, dims, texImage->_BaseFormat, texImage->TexFormat, texImage->Data, 0, 0, 0, /* dstX/Y/Zoffset */ dstRowStride, texImage->ImageOffsets, width, height, depth, format, type, pixels, unpack)) { _mesa_error(ctx, GL_OUT_OF_MEMORY, "glTexImage"); } _mesa_unmap_teximage_pbo(ctx, unpack); if (intelImage->mt) { intel_miptree_image_unmap(intel, intelImage->mt); texImage->Data = NULL; } UNLOCK_HARDWARE(intel); #if 0 /* GL_SGIS_generate_mipmap -- this can be accelerated now. */ if (level == texObj->BaseLevel && texObj->GenerateMipmap) { intel_generate_mipmap(ctx, target, &ctx->Texture.Unit[ctx->Texture.CurrentUnit], texObj); } #endif }
/** * Copy the window contents named by dPriv to the rotated (or reflected) * color buffer. * srcBuf is BUFFER_BIT_FRONT_LEFT or BUFFER_BIT_BACK_LEFT to indicate the source. */ void i830RotateWindow(intelContextPtr intel, __DRIdrawablePrivate *dPriv, GLuint srcBuf) { i830ContextPtr i830 = I830_CONTEXT( intel ); intelScreenPrivate *screen = intel->intelScreen; const GLuint cpp = screen->cpp; drm_clip_rect_t fullRect; GLuint textureFormat, srcOffset, srcPitch; const drm_clip_rect_t *clipRects; int numClipRects; int i; int xOrig, yOrig; int origNumClipRects; drm_clip_rect_t *origRects; /* * set up hardware state */ intelFlush( &intel->ctx ); SET_STATE( i830, meta ); set_initial_state( i830 ); set_no_texture( i830 ); set_vertex_format( i830 ); set_no_depth_stencil_write( i830 ); set_color_mask( i830, GL_FALSE ); LOCK_HARDWARE(intel); /* save current drawing origin and cliprects (restored at end) */ xOrig = intel->drawX; yOrig = intel->drawY; origNumClipRects = intel->numClipRects; origRects = intel->pClipRects; if (!intel->numClipRects) goto done; /* * set drawing origin, cliprects for full-screen access to rotated screen */ fullRect.x1 = 0; fullRect.y1 = 0; fullRect.x2 = screen->rotatedWidth; fullRect.y2 = screen->rotatedHeight; intel->drawX = 0; intel->drawY = 0; intel->numClipRects = 1; intel->pClipRects = &fullRect; set_draw_region( i830, &screen->rotated ); if (cpp == 4) textureFormat = MAPSURF_32BIT | MT_32BIT_ARGB8888; else textureFormat = MAPSURF_16BIT | MT_16BIT_RGB565; if (srcBuf == BUFFER_BIT_FRONT_LEFT) { srcPitch = screen->front.pitch; /* in bytes */ srcOffset = screen->front.offset; /* bytes */ clipRects = dPriv->pClipRects; numClipRects = dPriv->numClipRects; } else { srcPitch = screen->back.pitch; /* in bytes */ srcOffset = screen->back.offset; /* bytes */ clipRects = dPriv->pBackClipRects; numClipRects = dPriv->numBackClipRects; } /* set the whole screen up as a texture to avoid alignment issues */ set_tex_rect_source(i830, srcOffset, screen->width, screen->height, srcPitch, textureFormat); enable_texture_blend_replace(i830); /* * loop over the source window's cliprects */ for (i = 0; i < numClipRects; i++) { int srcX0 = clipRects[i].x1; int srcY0 = clipRects[i].y1; int srcX1 = clipRects[i].x2; int srcY1 = clipRects[i].y2; GLfloat verts[4][2], tex[4][2]; int j; /* build vertices for four corners of clip rect */ verts[0][0] = srcX0; verts[0][1] = srcY0; verts[1][0] = srcX1; verts[1][1] = srcY0; verts[2][0] = srcX1; verts[2][1] = srcY1; verts[3][0] = srcX0; verts[3][1] = srcY1; /* .. and texcoords */ tex[0][0] = srcX0; tex[0][1] = srcY0; tex[1][0] = srcX1; tex[1][1] = srcY0; tex[2][0] = srcX1; tex[2][1] = srcY1; tex[3][0] = srcX0; tex[3][1] = srcY1; /* transform coords to rotated screen coords */ for (j = 0; j < 4; j++) { matrix23TransformCoordf(&screen->rotMatrix, &verts[j][0], &verts[j][1]); } /* draw polygon to map source image to dest region */ draw_poly(i830, 255, 255, 255, 255, 4, verts, tex); } /* cliprect loop */ assert(!intel->prim.flush); intelFlushBatchLocked( intel, GL_FALSE, GL_FALSE, GL_FALSE ); done: /* restore original drawing origin and cliprects */ intel->drawX = xOrig; intel->drawY = yOrig; intel->numClipRects = origNumClipRects; intel->pClipRects = origRects; UNLOCK_HARDWARE(intel); SET_STATE( i830, state ); }
GLboolean i830TryTextureDrawPixels( GLcontext *ctx, GLint x, GLint y, GLsizei width, GLsizei height, GLenum format, GLenum type, const struct gl_pixelstore_attrib *unpack, const GLvoid *pixels ) { intelContextPtr intel = INTEL_CONTEXT(ctx); i830ContextPtr i830 = I830_CONTEXT(ctx); GLint pitch = unpack->RowLength ? unpack->RowLength : width; __DRIdrawablePrivate *dPriv = intel->driDrawable; int textureFormat; GLenum glTextureFormat; int dst_offset = i830->meta.Buffer[I830_DESTREG_CBUFADDR2]; int src_offset = intelAgpOffsetFromVirtual( intel, pixels ); if (INTEL_DEBUG & DEBUG_PIXEL) fprintf(stderr, "%s\n", __FUNCTION__); /* Todo -- upload images that aren't in agp space, then texture * from them. */ if ( !intelIsAgpMemory( intel, pixels, pitch*height ) ) { fprintf(stderr, "%s: intelIsAgpMemory failed\n", __FUNCTION__); return GL_FALSE; } /* Todo -- don't want to clobber all the drawing state like we do * for readpixels -- most of this state can be handled just fine. */ if ( ctx->_ImageTransferState || unpack->SwapBytes || unpack->LsbFirst || ctx->Color.AlphaEnabled || ctx->Depth.Test || ctx->Fog.Enabled || ctx->Scissor.Enabled || ctx->Stencil.Enabled || !ctx->Color.ColorMask[0] || !ctx->Color.ColorMask[1] || !ctx->Color.ColorMask[2] || !ctx->Color.ColorMask[3] || ctx->Color.ColorLogicOpEnabled || ctx->Texture._EnabledUnits || ctx->Depth.OcclusionTest) { fprintf(stderr, "%s: other tests failed\n", __FUNCTION__); return GL_FALSE; } /* Todo -- remove these restrictions: */ if (ctx->Pixel.ZoomX != 1.0F || ctx->Pixel.ZoomY != -1.0F) return GL_FALSE; switch (type) { case GL_UNSIGNED_SHORT_1_5_5_5_REV: if (format != GL_BGRA) return GL_FALSE; textureFormat = MAPSURF_16BIT | MT_16BIT_ARGB1555; glTextureFormat = GL_RGBA; break; case GL_UNSIGNED_SHORT_5_6_5: if (format != GL_RGB) return GL_FALSE; textureFormat = MAPSURF_16BIT | MT_16BIT_RGB565; glTextureFormat = GL_RGB; break; case GL_UNSIGNED_SHORT_8_8_MESA: if (format != GL_YCBCR_MESA) return GL_FALSE; textureFormat = (MAPSURF_422 | MT_422_YCRCB_SWAPY /* | TM0S1_COLORSPACE_CONVERSION */ ); glTextureFormat = GL_YCBCR_MESA; break; case GL_UNSIGNED_SHORT_8_8_REV_MESA: if (format != GL_YCBCR_MESA) return GL_FALSE; textureFormat = (MAPSURF_422 | MT_422_YCRCB_NORMAL /* | TM0S1_COLORSPACE_CONVERSION */ ); glTextureFormat = GL_YCBCR_MESA; break; case GL_UNSIGNED_INT_8_8_8_8_REV: if (format != GL_BGRA) return GL_FALSE; textureFormat = MAPSURF_32BIT | MT_32BIT_ARGB8888; glTextureFormat = GL_RGBA; break; default: fprintf(stderr, "%s: destFormat failed\n", __FUNCTION__); return GL_FALSE; } intelFlush( ctx ); SET_STATE( i830, meta ); LOCK_HARDWARE( intel ); { intelWaitForIdle( intel ); /* required by GL */ y -= height; /* cope with pixel zoom */ if (!driClipRectToFramebuffer(ctx->ReadBuffer, &x, &y, &width, &height)) { UNLOCK_HARDWARE( intel ); SET_STATE(i830, state); fprintf(stderr, "%s: cliprect failed\n", __FUNCTION__); return GL_TRUE; } y = dPriv->h - y - height; set_initial_state( i830 ); /* Set the pixel image up as a rectangular texture. */ set_tex_rect_source( i830, src_offset, width, height, pitch, /* XXXX!!!! -- /2 sometimes */ textureFormat ); enable_texture_blend_replace( i830 ); /* Draw to the current draw buffer: */ set_draw_offset( i830, dst_offset ); /* Draw a quad, use regular cliprects */ /* fprintf(stderr, "x: %d y: %d width %d height %d\n", x, y, width, height); */ draw_quad( i830, x, x+width, y, y+height, 0, 255, 0, 0, 0, width, 0, height ); intelWindowMoved( intel ); } UNLOCK_HARDWARE( intel ); intelFinish( ctx ); /* required by GL */ SET_STATE(i830, state); return GL_TRUE; }
GLboolean i830TryTextureReadPixels( GLcontext *ctx, GLint x, GLint y, GLsizei width, GLsizei height, GLenum format, GLenum type, const struct gl_pixelstore_attrib *pack, GLvoid *pixels ) { i830ContextPtr i830 = I830_CONTEXT(ctx); intelContextPtr intel = INTEL_CONTEXT(ctx); intelScreenPrivate *screen = i830->intel.intelScreen; GLint pitch = pack->RowLength ? pack->RowLength : width; __DRIdrawablePrivate *dPriv = i830->intel.driDrawable; int textureFormat; GLenum glTextureFormat; int src_offset = i830->meta.Buffer[I830_DESTREG_CBUFADDR2]; int destOffset = intelAgpOffsetFromVirtual( &i830->intel, pixels); int destFormat, depthFormat, destPitch; drm_clip_rect_t tmp; if (INTEL_DEBUG & DEBUG_PIXEL) fprintf(stderr, "%s\n", __FUNCTION__); if ( ctx->_ImageTransferState || pack->SwapBytes || pack->LsbFirst || !pack->Invert) { fprintf(stderr, "%s: check_color failed\n", __FUNCTION__); return GL_FALSE; } switch (screen->fbFormat) { case DV_PF_565: textureFormat = MAPSURF_16BIT | MT_16BIT_RGB565; glTextureFormat = GL_RGB; break; case DV_PF_555: textureFormat = MAPSURF_16BIT | MT_16BIT_ARGB1555; glTextureFormat = GL_RGBA; break; case DV_PF_8888: textureFormat = MAPSURF_32BIT | MT_32BIT_ARGB8888; glTextureFormat = GL_RGBA; break; default: fprintf(stderr, "%s: textureFormat failed %x\n", __FUNCTION__, screen->fbFormat); return GL_FALSE; } switch (type) { case GL_UNSIGNED_SHORT_5_6_5: if (format != GL_RGB) return GL_FALSE; destFormat = COLR_BUF_RGB565; depthFormat = DEPTH_FRMT_16_FIXED; destPitch = pitch * 2; break; case GL_UNSIGNED_INT_8_8_8_8_REV: if (format != GL_BGRA) return GL_FALSE; destFormat = COLR_BUF_ARGB8888; depthFormat = DEPTH_FRMT_24_FIXED_8_OTHER; destPitch = pitch * 4; break; default: fprintf(stderr, "%s: destFormat failed %s\n", __FUNCTION__, _mesa_lookup_enum_by_nr(type)); return GL_FALSE; } destFormat |= (0x02<<24); /* fprintf(stderr, "type: %s destFormat: %x\n", */ /* _mesa_lookup_enum_by_nr(type), */ /* destFormat); */ intelFlush( ctx ); SET_STATE( i830, meta ); set_initial_state( i830 ); set_no_depth_stencil_write( i830 ); LOCK_HARDWARE( intel ); { intelWaitForIdle( intel ); /* required by GL */ if (!driClipRectToFramebuffer(ctx->ReadBuffer, &x, &y, &width, &height)) { UNLOCK_HARDWARE( intel ); SET_STATE(i830, state); fprintf(stderr, "%s: cliprect failed\n", __FUNCTION__); return GL_TRUE; } #if 0 /* FIXME -- Just emit the correct state */ if (i830SetParam(i830->driFd, I830_SETPARAM_CBUFFER_PITCH, destPitch) != 0) { UNLOCK_HARDWARE( intel ); SET_STATE(i830, state); fprintf(stderr, "%s: setparam failed\n", __FUNCTION__); return GL_FALSE; } #endif y = dPriv->h - y - height; x += dPriv->x; y += dPriv->y; /* Set the frontbuffer up as a large rectangular texture. */ set_tex_rect_source( i830, src_offset, screen->width, screen->height, screen->front.pitch, textureFormat ); enable_texture_blend_replace( i830 ); /* Set the 3d engine to draw into the agp memory */ set_draw_region( i830, destOffset ); set_draw_format( i830, destFormat, depthFormat ); /* Draw a single quad, no cliprects: */ i830->intel.numClipRects = 1; i830->intel.pClipRects = &tmp; i830->intel.pClipRects[0].x1 = 0; i830->intel.pClipRects[0].y1 = 0; i830->intel.pClipRects[0].x2 = width; i830->intel.pClipRects[0].y2 = height; draw_quad( i830, 0, width, 0, height, 0, 255, 0, 0, x, x+width, y, y+height ); intelWindowMoved( intel ); } UNLOCK_HARDWARE( intel ); intelFinish( ctx ); /* required by GL */ SET_STATE( i830, state ); return GL_TRUE; }
/* * Copy the back buffer to the front buffer. */ void intelCopyBuffer( const __DRIdrawablePrivate *dPriv ) { intelContextPtr intel; if (0) fprintf(stderr, "%s\n", __FUNCTION__); assert(dPriv); assert(dPriv->driContextPriv); assert(dPriv->driContextPriv->driverPrivate); intel = (intelContextPtr) dPriv->driContextPriv->driverPrivate; intelFlush( &intel->ctx ); LOCK_HARDWARE( intel ); { intelScreenPrivate *intelScreen = intel->intelScreen; __DRIdrawablePrivate *dPriv = intel->driDrawable; int nbox = dPriv->numClipRects; drm_clip_rect_t *pbox = dPriv->pClipRects; int pitch = intelScreen->frontPitch; int cpp = intelScreen->cpp; int i; GLuint CMD, BR13; BATCH_LOCALS; switch(cpp) { case 2: BR13 = (pitch * cpp) | (0xCC << 16) | (1<<24); CMD = XY_SRC_COPY_BLT_CMD; break; case 4: BR13 = (pitch * cpp) | (0xCC << 16) | (1<<24) | (1<<25); CMD = (XY_SRC_COPY_BLT_CMD | XY_SRC_COPY_BLT_WRITE_ALPHA | XY_SRC_COPY_BLT_WRITE_RGB); break; default: BR13 = (pitch * cpp) | (0xCC << 16) | (1<<24); CMD = XY_SRC_COPY_BLT_CMD; break; } if (0) intel_draw_performance_boxes( intel ); for (i = 0 ; i < nbox; i++, pbox++) { if (pbox->x1 > pbox->x2 || pbox->y1 > pbox->y2 || pbox->x2 > intelScreen->width || pbox->y2 > intelScreen->height) continue; BEGIN_BATCH( 8); OUT_BATCH( CMD ); OUT_BATCH( BR13 ); OUT_BATCH( (pbox->y1 << 16) | pbox->x1 ); OUT_BATCH( (pbox->y2 << 16) | pbox->x2 ); if (intel->sarea->pf_current_page == 0) OUT_BATCH( intelScreen->frontOffset ); else OUT_BATCH( intelScreen->backOffset ); OUT_BATCH( (pbox->y1 << 16) | pbox->x1 ); OUT_BATCH( BR13 & 0xffff ); if (intel->sarea->pf_current_page == 0) OUT_BATCH( intelScreen->backOffset ); else OUT_BATCH( intelScreen->frontOffset ); ADVANCE_BATCH(); } } intelFlushBatchLocked( intel, GL_TRUE, GL_TRUE, GL_TRUE ); UNLOCK_HARDWARE( intel ); }
void intelClearWithBlit(GLcontext *ctx, GLbitfield flags, GLboolean all, GLint cx1, GLint cy1, GLint cw, GLint ch) { intelContextPtr intel = INTEL_CONTEXT( ctx ); intelScreenPrivate *intelScreen = intel->intelScreen; GLuint clear_depth, clear_color; GLint cx, cy; GLint pitch = intelScreen->frontPitch; GLint cpp = intelScreen->cpp; GLint i; GLuint BR13, CMD, D_CMD; BATCH_LOCALS; clear_color = intel->ClearColor; clear_depth = 0; if (flags & BUFFER_BIT_DEPTH) { clear_depth = (GLuint)(ctx->Depth.Clear * intel->ClearDepth); } if (flags & BUFFER_BIT_STENCIL) { clear_depth |= (ctx->Stencil.Clear & 0xff) << 24; } switch(cpp) { case 2: BR13 = (0xF0 << 16) | (pitch * cpp) | (1<<24); D_CMD = CMD = XY_COLOR_BLT_CMD; break; case 4: BR13 = (0xF0 << 16) | (pitch * cpp) | (1<<24) | (1<<25); CMD = (XY_COLOR_BLT_CMD | XY_COLOR_BLT_WRITE_ALPHA | XY_COLOR_BLT_WRITE_RGB); D_CMD = XY_COLOR_BLT_CMD; if (flags & BUFFER_BIT_DEPTH) D_CMD |= XY_COLOR_BLT_WRITE_RGB; if (flags & BUFFER_BIT_STENCIL) D_CMD |= XY_COLOR_BLT_WRITE_ALPHA; break; default: BR13 = (0xF0 << 16) | (pitch * cpp) | (1<<24); D_CMD = CMD = XY_COLOR_BLT_CMD; break; } intelFlush( &intel->ctx ); LOCK_HARDWARE( intel ); { /* flip top to bottom */ cy = intel->driDrawable->h-cy1-ch; cx = cx1 + intel->drawX; cy += intel->drawY; /* adjust for page flipping */ if ( intel->sarea->pf_current_page == 1 ) { GLuint tmp = flags; flags &= ~(BUFFER_BIT_FRONT_LEFT | BUFFER_BIT_BACK_LEFT); if ( tmp & BUFFER_BIT_FRONT_LEFT ) flags |= BUFFER_BIT_BACK_LEFT; if ( tmp & BUFFER_BIT_BACK_LEFT ) flags |= BUFFER_BIT_FRONT_LEFT; } for (i = 0 ; i < intel->numClipRects ; i++) { drm_clip_rect_t *box = &intel->pClipRects[i]; drm_clip_rect_t b; if (!all) { GLint x = box[i].x1; GLint y = box[i].y1; GLint w = box[i].x2 - x; GLint h = box[i].y2 - y; if (x < cx) w -= cx - x, x = cx; if (y < cy) h -= cy - y, y = cy; if (x + w > cx + cw) w = cx + cw - x; if (y + h > cy + ch) h = cy + ch - y; if (w <= 0) continue; if (h <= 0) continue; b.x1 = x; b.y1 = y; b.x2 = x + w; b.y2 = y + h; } else { b = *box; } if (b.x1 > b.x2 || b.y1 > b.y2 || b.x2 > intelScreen->width || b.y2 > intelScreen->height) continue; if ( flags & BUFFER_BIT_FRONT_LEFT ) { BEGIN_BATCH( 6); OUT_BATCH( CMD ); OUT_BATCH( BR13 ); OUT_BATCH( (b.y1 << 16) | b.x1 ); OUT_BATCH( (b.y2 << 16) | b.x2 ); OUT_BATCH( intelScreen->frontOffset ); OUT_BATCH( clear_color ); ADVANCE_BATCH(); } if ( flags & BUFFER_BIT_BACK_LEFT ) { BEGIN_BATCH( 6); OUT_BATCH( CMD ); OUT_BATCH( BR13 ); OUT_BATCH( (b.y1 << 16) | b.x1 ); OUT_BATCH( (b.y2 << 16) | b.x2 ); OUT_BATCH( intelScreen->backOffset ); OUT_BATCH( clear_color ); ADVANCE_BATCH(); } if ( flags & (BUFFER_BIT_STENCIL | BUFFER_BIT_DEPTH) ) { BEGIN_BATCH( 6); OUT_BATCH( D_CMD ); OUT_BATCH( BR13 ); OUT_BATCH( (b.y1 << 16) | b.x1 ); OUT_BATCH( (b.y2 << 16) | b.x2 ); OUT_BATCH( intelScreen->depthOffset ); OUT_BATCH( clear_depth ); ADVANCE_BATCH(); } } } intelFlushBatchLocked( intel, GL_TRUE, GL_FALSE, GL_TRUE ); UNLOCK_HARDWARE( intel ); }
static GLboolean do_copy_texsubimage(struct intel_context *intel, struct intel_texture_image *intelImage, GLenum internalFormat, GLint dstx, GLint dsty, GLint x, GLint y, GLsizei width, GLsizei height) { GLcontext *ctx = &intel->ctx; const struct intel_region *src = get_teximage_source(intel, internalFormat); if (!intelImage->mt || !src) { DBG("%s fail %p %p\n", __FUNCTION__, intelImage->mt, src); return GL_FALSE; } intelFlush(ctx); LOCK_HARDWARE(intel); { GLuint image_offset = intel_miptree_image_offset(intelImage->mt, intelImage->face, intelImage->level); const GLint orig_x = x; const GLint orig_y = y; const struct gl_framebuffer *fb = ctx->DrawBuffer; if (_mesa_clip_to_region(fb->_Xmin, fb->_Ymin, fb->_Xmax, fb->_Ymax, &x, &y, &width, &height)) { /* Update dst for clipped src. Need to also clip the source rect. */ dstx += x - orig_x; dsty += y - orig_y; if (ctx->ReadBuffer->Name == 0) { /* reading from a window, adjust x, y */ __DRIdrawablePrivate *dPriv = intel->driDrawable; GLuint window_y; /* window_y = position of window on screen if y=0=bottom */ window_y = intel->intelScreen->height - (dPriv->y + dPriv->h); y = window_y + y; x += dPriv->x; } else { /* reading from a FBO */ /* invert Y */ y = ctx->ReadBuffer->Height - y - 1; } /* A bit of fiddling to get the blitter to work with -ve * pitches. But we get a nice inverted blit this way, so it's * worth it: */ intelEmitCopyBlit(intel, intelImage->mt->cpp, -src->pitch, src->buffer, src->height * src->pitch * src->cpp, intelImage->mt->pitch, intelImage->mt->region->buffer, image_offset, x, y + height, dstx, dsty, width, height, GL_COPY); /* ? */ intel_batchbuffer_flush(intel->batch); } } UNLOCK_HARDWARE(intel); #if 0 /* GL_SGIS_generate_mipmap -- this can be accelerated now. * XXX Add a ctx->Driver.GenerateMipmaps() function? */ if (level == texObj->BaseLevel && texObj->GenerateMipmap) { intel_generate_mipmap(ctx, target, &ctx->Texture.Unit[ctx->Texture.CurrentUnit], texObj); } #endif return GL_TRUE; }
void IntelMausi::intelEnableIRQ(UInt32 newMask) { intelWriteMem32(E1000_IMS, newMask); intelFlush(); }
static GLboolean intelTryReadPixels( GLcontext *ctx, GLint x, GLint y, GLsizei width, GLsizei height, GLenum format, GLenum type, const struct gl_pixelstore_attrib *pack, GLvoid *pixels ) { intelContextPtr intel = INTEL_CONTEXT(ctx); GLint size = 0; /* not really used */ GLint pitch = pack->RowLength ? pack->RowLength : width; if (INTEL_DEBUG & DEBUG_PIXEL) fprintf(stderr, "%s\n", __FUNCTION__); /* Only accelerate reading to agp buffers. */ if ( !intelIsAgpMemory(intel, pixels, pitch * height * intel->intelScreen->cpp ) ) { if (INTEL_DEBUG & DEBUG_PIXEL) fprintf(stderr, "%s: dest not agp\n", __FUNCTION__); return GL_FALSE; } /* Need GL_PACK_INVERT_MESA to cope with upsidedown results from * blitter: */ if (!pack->Invert) { if (INTEL_DEBUG & DEBUG_PIXEL) fprintf(stderr, "%s: MESA_PACK_INVERT not set\n", __FUNCTION__); return GL_FALSE; } if (!check_color(ctx, type, format, pack, pixels, size, pitch)) return GL_FALSE; switch ( intel->intelScreen->cpp ) { case 4: break; default: return GL_FALSE; } /* Although the blits go on the command buffer, need to do this and * fire with lock held to guarentee cliprects and drawing offset are * correct. * * This is an unusual situation however, as the code which flushes * a full command buffer expects to be called unlocked. As a * workaround, immediately flush the buffer on aquiring the lock. */ intelFlush( &intel->ctx ); LOCK_HARDWARE( intel ); { __DRIdrawablePrivate *dPriv = intel->driDrawable; int nbox = dPriv->numClipRects; int src_offset = intel->readRegion->offset; int src_pitch = intel->intelScreen->front.pitch; int dst_offset = intelAgpOffsetFromVirtual( intel, pixels); drm_clip_rect_t *box = dPriv->pClipRects; int i; assert(dst_offset != ~0); /* should have been caught above */ if (!clip_pixelrect(ctx, ctx->ReadBuffer, &x, &y, &width, &height)) { UNLOCK_HARDWARE( intel ); if (INTEL_DEBUG & DEBUG_PIXEL) fprintf(stderr, "%s totally clipped -- nothing to do\n", __FUNCTION__); return GL_TRUE; } /* convert to screen coords (y=0=top) */ y = dPriv->h - y - height; x += dPriv->x; y += dPriv->y; if (INTEL_DEBUG & DEBUG_PIXEL) fprintf(stderr, "readpixel blit src_pitch %d dst_pitch %d\n", src_pitch, pitch); /* We don't really have to do window clipping for readpixels. * The OpenGL spec says that pixels read from outside the * visible window region (pixel ownership) have undefined value. */ for (i = 0 ; i < nbox ; i++) { GLint bx, by, bw, bh; if (intersect_region(box+i, x, y, width, height, &bx, &by, &bw, &bh)) { intelEmitCopyBlitLocked( intel, intel->intelScreen->cpp, src_pitch, src_offset, pitch, dst_offset, bx, by, bx - x, by - y, bw, bh ); } } } UNLOCK_HARDWARE( intel ); intelFinish( &intel->ctx ); return GL_TRUE; }
/** * Called via glRenderbufferStorageEXT() to set the format and allocate * storage for a user-created renderbuffer. */ static GLboolean intel_alloc_renderbuffer_storage(GLcontext * ctx, struct gl_renderbuffer *rb, GLenum internalFormat, GLuint width, GLuint height) { struct intel_context *intel = intel_context(ctx); struct intel_renderbuffer *irb = intel_renderbuffer(rb); int cpp; GLuint pitch; ASSERT(rb->Name != 0); switch (internalFormat) { case GL_R3_G3_B2: case GL_RGB4: case GL_RGB5: rb->Format = MESA_FORMAT_RGB565; rb->DataType = GL_UNSIGNED_BYTE; break; case GL_RGB: case GL_RGB8: case GL_RGB10: case GL_RGB12: case GL_RGB16: rb->Format = MESA_FORMAT_XRGB8888; rb->DataType = GL_UNSIGNED_BYTE; break; case GL_RGBA: case GL_RGBA2: case GL_RGBA4: case GL_RGB5_A1: case GL_RGBA8: case GL_RGB10_A2: case GL_RGBA12: case GL_RGBA16: rb->Format = MESA_FORMAT_ARGB8888; rb->DataType = GL_UNSIGNED_BYTE; break; case GL_STENCIL_INDEX: case GL_STENCIL_INDEX1_EXT: case GL_STENCIL_INDEX4_EXT: case GL_STENCIL_INDEX8_EXT: case GL_STENCIL_INDEX16_EXT: /* alloc a depth+stencil buffer */ rb->Format = MESA_FORMAT_S8_Z24; rb->DataType = GL_UNSIGNED_INT_24_8_EXT; break; case GL_DEPTH_COMPONENT16: rb->Format = MESA_FORMAT_Z16; rb->DataType = GL_UNSIGNED_SHORT; break; case GL_DEPTH_COMPONENT: case GL_DEPTH_COMPONENT24: case GL_DEPTH_COMPONENT32: rb->Format = MESA_FORMAT_S8_Z24; rb->DataType = GL_UNSIGNED_INT_24_8_EXT; break; case GL_DEPTH_STENCIL_EXT: case GL_DEPTH24_STENCIL8_EXT: rb->Format = MESA_FORMAT_S8_Z24; rb->DataType = GL_UNSIGNED_INT_24_8_EXT; break; default: _mesa_problem(ctx, "Unexpected format in intel_alloc_renderbuffer_storage"); return GL_FALSE; } rb->_BaseFormat = _mesa_base_fbo_format(ctx, internalFormat); cpp = _mesa_get_format_bytes(rb->Format); intelFlush(ctx); /* free old region */ if (irb->region) { intel_region_release(&irb->region); } /* allocate new memory region/renderbuffer */ /* Choose a pitch to match hardware requirements: */ pitch = ((cpp * width + 63) & ~63) / cpp; /* alloc hardware renderbuffer */ DBG("Allocating %d x %d Intel RBO (pitch %d)\n", width, height, pitch); irb->region = intel_region_alloc(intel, I915_TILING_NONE, cpp, width, height, pitch, GL_TRUE); if (!irb->region) return GL_FALSE; /* out of memory? */ ASSERT(irb->region->buffer); rb->Width = width; rb->Height = height; return GL_TRUE; }