/** * Do software-based glCopyPixels. * By time we get here, all parameters will have been error-checked. */ void _swrast_CopyPixels( GLcontext *ctx, GLint srcx, GLint srcy, GLsizei width, GLsizei height, GLint destx, GLint desty, GLenum type ) { SWcontext *swrast = SWRAST_CONTEXT(ctx); swrast_render_start(ctx); if (!_mesa_check_conditional_render(ctx)) return; /* don't copy */ if (swrast->NewState) _swrast_validate_derived( ctx ); if (!fast_copy_pixels(ctx, srcx, srcy, width, height, destx, desty, type)) { switch (type) { case GL_COLOR: copy_rgba_pixels( ctx, srcx, srcy, width, height, destx, desty ); break; case GL_DEPTH: copy_depth_pixels( ctx, srcx, srcy, width, height, destx, desty ); break; case GL_STENCIL: copy_stencil_pixels( ctx, srcx, srcy, width, height, destx, desty ); break; case GL_DEPTH_STENCIL_EXT: copy_depth_stencil_pixels(ctx, srcx, srcy, width, height, destx, desty); break; default: _mesa_problem(ctx, "unexpected type in _swrast_CopyPixels"); } } swrast_render_finish(ctx); }
void intelDrawPixels(struct gl_context * ctx, GLint x, GLint y, GLsizei width, GLsizei height, GLenum format, GLenum type, const struct gl_pixelstore_attrib *unpack, const GLvoid * pixels) { struct brw_context *brw = brw_context(ctx); if (!_mesa_check_conditional_render(ctx)) return; if (format == GL_STENCIL_INDEX) { _swrast_DrawPixels(ctx, x, y, width, height, format, type, unpack, pixels); return; } if (_mesa_is_bufferobj(unpack->BufferObj)) { if (do_blit_drawpixels(ctx, x, y, width, height, format, type, unpack, pixels)) { return; } perf_debug("%s: fallback to generic code in PBO case\n", __FUNCTION__); } _mesa_meta_DrawPixels(ctx, x, y, width, height, format, type, unpack, pixels); }
void brw_draw_prims( struct gl_context *ctx, const struct _mesa_prim *prims, GLuint nr_prims, const struct _mesa_index_buffer *ib, GLboolean index_bounds_valid, GLuint min_index, GLuint max_index, struct gl_transform_feedback_object *unused_tfb_object, struct gl_buffer_object *indirect ) { struct brw_context *brw = brw_context(ctx); const struct gl_client_array **arrays = ctx->Array._DrawArrays; assert(unused_tfb_object == NULL); if (ctx->Query.CondRenderQuery) { perf_debug("Conditional rendering is implemented in software and may " "stall. This should be fixed in the driver.\n"); } if (!_mesa_check_conditional_render(ctx)) return; /* Handle primitive restart if needed */ if (brw_handle_primitive_restart(ctx, prims, nr_prims, ib, indirect)) { /* The draw was handled, so we can exit now */ return; } /* Do GL_SELECT and GL_FEEDBACK rendering using swrast, even though it * won't support all the extensions we support. */ if (ctx->RenderMode != GL_RENDER) { perf_debug("%s render mode not supported in hardware\n", _mesa_lookup_enum_by_nr(ctx->RenderMode)); _swsetup_Wakeup(ctx); _tnl_wakeup(ctx); _tnl_draw_prims(ctx, prims, nr_prims, ib, index_bounds_valid, min_index, max_index, NULL, NULL); return; } /* If we're going to have to upload any of the user's vertex arrays, then * get the minimum and maximum of their index buffer so we know what range * to upload. */ if (!index_bounds_valid && !vbo_all_varyings_in_vbos(arrays)) { perf_debug("Scanning index buffer to compute index buffer bounds. " "Use glDrawRangeElements() to avoid this.\n"); vbo_get_minmax_indices(ctx, prims, ib, &min_index, &max_index, nr_prims); } /* Try drawing with the hardware, but don't do anything else if we can't * manage it. swrast doesn't support our featureset, so we can't fall back * to it. */ brw_try_draw_prims(ctx, arrays, prims, nr_prims, ib, min_index, max_index, indirect); }
/** * Called via the device driver's ctx->Driver.Clear() function if the * device driver can't clear one or more of the buffers itself. * \param buffers bitfield of BUFFER_BIT_* values indicating which * renderbuffers are to be cleared. * \param all if GL_TRUE, clear whole buffer, else clear specified region. */ void _swrast_Clear(struct gl_context *ctx, GLbitfield buffers) { const GLbitfield BUFFER_DS = BUFFER_BIT_DEPTH | BUFFER_BIT_STENCIL; #ifdef DEBUG_FOO { const GLbitfield legalBits = BUFFER_BIT_FRONT_LEFT | BUFFER_BIT_FRONT_RIGHT | BUFFER_BIT_BACK_LEFT | BUFFER_BIT_BACK_RIGHT | BUFFER_BIT_DEPTH | BUFFER_BIT_STENCIL | BUFFER_BIT_ACCUM | BUFFER_BIT_AUX0; assert((buffers & (~legalBits)) == 0); } #endif if (!_mesa_check_conditional_render(ctx)) return; /* don't clear */ if (SWRAST_CONTEXT(ctx)->NewState) _swrast_validate_derived(ctx); if ((buffers & BUFFER_BITS_COLOR) && (ctx->DrawBuffer->_NumColorDrawBuffers > 0)) { clear_color_buffers(ctx); } if (buffers & BUFFER_BIT_ACCUM) { _mesa_clear_accum_buffer(ctx); } if (buffers & BUFFER_DS) { struct gl_renderbuffer *depthRb = ctx->DrawBuffer->Attachment[BUFFER_DEPTH].Renderbuffer; struct gl_renderbuffer *stencilRb = ctx->DrawBuffer->Attachment[BUFFER_STENCIL].Renderbuffer; if ((buffers & BUFFER_DS) == BUFFER_DS && depthRb == stencilRb) { /* clear depth and stencil together */ _swrast_clear_depth_stencil_buffer(ctx); } else { /* clear depth, stencil separately */ if (buffers & BUFFER_BIT_DEPTH) { _swrast_clear_depth_buffer(ctx); } if (buffers & BUFFER_BIT_STENCIL) { _swrast_clear_stencil_buffer(ctx); } } } }
/** * Called by ctx->Driver.Clear. */ static void brw_clear(struct gl_context *ctx, GLbitfield mask) { struct brw_context *brw = brw_context(ctx); struct gl_framebuffer *fb = ctx->DrawBuffer; bool partial_clear = ctx->Scissor.EnableFlags && !noop_scissor(ctx, fb); if (!_mesa_check_conditional_render(ctx)) return; if (mask & (BUFFER_BIT_FRONT_LEFT | BUFFER_BIT_FRONT_RIGHT)) { brw->front_buffer_dirty = true; } intel_prepare_render(brw); brw_workaround_depthstencil_alignment(brw, partial_clear ? 0 : mask); if (mask & BUFFER_BIT_DEPTH) { if (brw_fast_clear_depth(ctx)) { DBG("fast clear: depth\n"); mask &= ~BUFFER_BIT_DEPTH; } } /* BLORP is currently only supported on Gen6+. */ if (brw->gen >= 6 && brw->gen < 8) { if (mask & BUFFER_BITS_COLOR) { if (brw_blorp_clear_color(brw, fb, mask, partial_clear)) { debug_mask("blorp color", mask & BUFFER_BITS_COLOR); mask &= ~BUFFER_BITS_COLOR; } } } GLbitfield tri_mask = mask & (BUFFER_BITS_COLOR | BUFFER_BIT_STENCIL | BUFFER_BIT_DEPTH); if (tri_mask) { debug_mask("tri", tri_mask); mask &= ~tri_mask; if (ctx->API == API_OPENGLES) { _mesa_meta_Clear(&brw->ctx, tri_mask); } else { _mesa_meta_glsl_Clear(&brw->ctx, tri_mask); } } /* Any strange buffers get passed off to swrast */ if (mask) { debug_mask("swrast", mask); _swrast_Clear(ctx, mask); } }
/** * Do software-based glCopyPixels. * By time we get here, all parameters will have been error-checked. */ void _swrast_CopyPixels( struct gl_context *ctx, GLint srcx, GLint srcy, GLsizei width, GLsizei height, GLint destx, GLint desty, GLenum type ) { SWcontext *swrast = SWRAST_CONTEXT(ctx); struct gl_renderbuffer *rb; if (!_mesa_check_conditional_render(ctx)) return; /* don't copy */ if (swrast->NewState) _swrast_validate_derived( ctx ); if (!(SWRAST_CONTEXT(ctx)->_RasterMask != 0x0 || ctx->Pixel.ZoomX != 1.0F || ctx->Pixel.ZoomY != 1.0F || ctx->_ImageTransferState) && swrast_fast_copy_pixels(ctx, srcx, srcy, width, height, destx, desty, type)) { /* all done */ return; } swrast_render_start(ctx); rb = map_readbuffer(ctx, type); switch (type) { case GL_COLOR: copy_rgba_pixels( ctx, srcx, srcy, width, height, destx, desty ); break; case GL_DEPTH: copy_depth_pixels( ctx, srcx, srcy, width, height, destx, desty ); break; case GL_STENCIL: copy_stencil_pixels( ctx, srcx, srcy, width, height, destx, desty ); break; case GL_DEPTH_STENCIL_EXT: /* Copy buffers separately (if the fast copy path wasn't taken) */ copy_depth_pixels(ctx, srcx, srcy, width, height, destx, desty); copy_stencil_pixels(ctx, srcx, srcy, width, height, destx, desty); break; default: _mesa_problem(ctx, "unexpected type in _swrast_CopyPixels"); } swrast_render_finish(ctx); if (rb) { struct swrast_renderbuffer *srb = swrast_renderbuffer(rb); ctx->Driver.UnmapRenderbuffer(ctx, rb); srb->Map = NULL; } }
bool brw_check_conditional_render(struct brw_context *brw) { if (brw->predicate.state == BRW_PREDICATE_STATE_STALL_FOR_QUERY) { perf_debug("Conditional rendering is implemented in software and may " "stall.\n"); return _mesa_check_conditional_render(&brw->ctx); } return brw->predicate.state != BRW_PREDICATE_STATE_DONT_RENDER; }
void brw_draw_prims( struct gl_context *ctx, const struct _mesa_prim *prim, GLuint nr_prims, const struct _mesa_index_buffer *ib, GLboolean index_bounds_valid, GLuint min_index, GLuint max_index, struct gl_transform_feedback_object *tfb_vertcount ) { const struct gl_client_array **arrays = ctx->Array._DrawArrays; bool retval; if (!_mesa_check_conditional_render(ctx)) return; /* Handle primitive restart if needed */ if (brw_handle_primitive_restart(ctx, prim, nr_prims, ib)) { /* The draw was handled, so we can exit now */ return; } if (!vbo_all_varyings_in_vbos(arrays)) { if (!index_bounds_valid) vbo_get_minmax_indices(ctx, prim, ib, &min_index, &max_index, nr_prims); /* Decide if we want to rebase. If so we end up recursing once * only into this function. */ if (min_index != 0 && !vbo_any_varyings_in_vbos(arrays)) { vbo_rebase_prims(ctx, arrays, prim, nr_prims, ib, min_index, max_index, brw_draw_prims ); return; } } /* Make a first attempt at drawing: */ retval = brw_try_draw_prims(ctx, arrays, prim, nr_prims, ib, min_index, max_index); /* Otherwise, we really are out of memory. Pass the drawing * command to the software tnl module and which will in turn call * swrast to do the drawing. */ if (!retval) { _swsetup_Wakeup(ctx); _tnl_wakeup(ctx); _tnl_draw_prims(ctx, arrays, prim, nr_prims, ib, min_index, max_index); } }
/* There are a large number of possible ways to implement bitmap on * this hardware, most of them have some sort of drawback. Here are a * few that spring to mind: * * Blit: * - XY_MONO_SRC_BLT_CMD * - use XY_SETUP_CLIP_BLT for cliprect clipping. * - XY_TEXT_BLT * - XY_TEXT_IMMEDIATE_BLT * - blit per cliprect, subject to maximum immediate data size. * - XY_COLOR_BLT * - per pixel or run of pixels * - XY_PIXEL_BLT * - good for sparse bitmaps * * 3D engine: * - Point per pixel * - Translate bitmap to an alpha texture and render as a quad * - Chop bitmap up into 32x32 squares and render w/polygon stipple. */ void intelBitmap(struct gl_context * ctx, GLint x, GLint y, GLsizei width, GLsizei height, const struct gl_pixelstore_attrib *unpack, const GLubyte * pixels) { if (!_mesa_check_conditional_render(ctx)) return; if (do_blit_bitmap(ctx, x, y, width, height, unpack, pixels)) return; _mesa_meta_Bitmap(ctx, x, y, width, height, unpack, pixels); }
void brw_draw_prims( struct gl_context *ctx, const struct _mesa_prim *prim, GLuint nr_prims, const struct _mesa_index_buffer *ib, GLboolean index_bounds_valid, GLuint min_index, GLuint max_index, struct gl_transform_feedback_object *tfb_vertcount ) { struct intel_context *intel = intel_context(ctx); const struct gl_client_array **arrays = ctx->Array._DrawArrays; if (!_mesa_check_conditional_render(ctx)) return; /* Handle primitive restart if needed */ if (brw_handle_primitive_restart(ctx, prim, nr_prims, ib)) { /* The draw was handled, so we can exit now */ return; } /* If we're going to have to upload any of the user's vertex arrays, then * get the minimum and maximum of their index buffer so we know what range * to upload. */ if (!vbo_all_varyings_in_vbos(arrays) && !index_bounds_valid) vbo_get_minmax_indices(ctx, prim, ib, &min_index, &max_index, nr_prims); /* Do GL_SELECT and GL_FEEDBACK rendering using swrast, even though it * won't support all the extensions we support. */ if (ctx->RenderMode != GL_RENDER) { perf_debug("%s render mode not supported in hardware\n", _mesa_lookup_enum_by_nr(ctx->RenderMode)); _swsetup_Wakeup(ctx); _tnl_wakeup(ctx); _tnl_draw_prims(ctx, arrays, prim, nr_prims, ib, min_index, max_index); return; } /* Try drawing with the hardware, but don't do anything else if we can't * manage it. swrast doesn't support our featureset, so we can't fall back * to it. */ brw_try_draw_prims(ctx, arrays, prim, nr_prims, ib, min_index, max_index); }
void intelCopyPixels(struct gl_context * ctx, GLint srcx, GLint srcy, GLsizei width, GLsizei height, GLint destx, GLint desty, GLenum type) { DBG("%s\n", __FUNCTION__); if (!_mesa_check_conditional_render(ctx)) return; if (do_blit_copypixels(ctx, srcx, srcy, width, height, destx, desty, type)) return; /* this will use swrast if needed */ _mesa_meta_CopyPixels(ctx, srcx, srcy, width, height, destx, desty, type); }
/** * Software fallback for glAccum. A hardware driver that supports * signed 16-bit color channels could implement hardware accumulation * operations, but no driver does so at this time. */ void _mesa_accum(struct gl_context *ctx, GLenum op, GLfloat value) { GLint xpos, ypos, width, height; if (!ctx->DrawBuffer->Attachment[BUFFER_ACCUM].Renderbuffer) { _mesa_warning(ctx, "Calling glAccum() without an accumulation buffer"); return; } if (!_mesa_check_conditional_render(ctx)) return; xpos = ctx->DrawBuffer->_Xmin; ypos = ctx->DrawBuffer->_Ymin; width = ctx->DrawBuffer->_Xmax - ctx->DrawBuffer->_Xmin; height = ctx->DrawBuffer->_Ymax - ctx->DrawBuffer->_Ymin; switch (op) { case GL_ADD: if (value != 0.0F) { accum_scale_or_bias(ctx, value, xpos, ypos, width, height, GL_TRUE); } break; case GL_MULT: if (value != 1.0F) { accum_scale_or_bias(ctx, value, xpos, ypos, width, height, GL_FALSE); } break; case GL_ACCUM: if (value != 0.0F) { accum_or_load(ctx, value, xpos, ypos, width, height, GL_FALSE); } break; case GL_LOAD: accum_or_load(ctx, value, xpos, ypos, width, height, GL_TRUE); break; case GL_RETURN: accum_return(ctx, value, xpos, ypos, width, height); break; default: _mesa_problem(ctx, "invalid mode in _mesa_accum()"); break; } }
void brw_draw_prims( struct gl_context *ctx, const struct gl_client_array *arrays[], const struct _mesa_prim *prim, GLuint nr_prims, const struct _mesa_index_buffer *ib, GLboolean index_bounds_valid, GLuint min_index, GLuint max_index ) { GLboolean retval; if (!_mesa_check_conditional_render(ctx)) return; if (!vbo_all_varyings_in_vbos(arrays)) { if (!index_bounds_valid) vbo_get_minmax_index(ctx, prim, ib, &min_index, &max_index); /* Decide if we want to rebase. If so we end up recursing once * only into this function. */ if (min_index != 0 && !vbo_any_varyings_in_vbos(arrays)) { vbo_rebase_prims(ctx, arrays, prim, nr_prims, ib, min_index, max_index, brw_draw_prims ); return; } } /* Make a first attempt at drawing: */ retval = brw_try_draw_prims(ctx, arrays, prim, nr_prims, ib, min_index, max_index); /* Otherwise, we really are out of memory. Pass the drawing * command to the software tnl module and which will in turn call * swrast to do the drawing. */ if (!retval) { _swsetup_Wakeup(ctx); _tnl_wakeup(ctx); _tnl_draw_prims(ctx, arrays, prim, nr_prims, ib, min_index, max_index); } }
/** * Called by ctx->Driver.Clear. */ static void brw_clear(struct gl_context *ctx, GLbitfield mask) { struct intel_context *intel = intel_context(ctx); if (!_mesa_check_conditional_render(ctx)) return; if (mask & (BUFFER_BIT_FRONT_LEFT | BUFFER_BIT_FRONT_RIGHT)) { intel->front_buffer_dirty = true; } intel_prepare_render(intel); if (mask & BUFFER_BIT_DEPTH) { if (brw_fast_clear_depth(ctx)) { DBG("fast clear: depth\n"); mask &= ~BUFFER_BIT_DEPTH; } } GLbitfield tri_mask = mask & (BUFFER_BITS_COLOR | BUFFER_BIT_STENCIL | BUFFER_BIT_DEPTH); if (tri_mask) { debug_mask("tri", tri_mask); mask &= ~tri_mask; if (ctx->API == API_OPENGLES) { _mesa_meta_Clear(&intel->ctx, tri_mask); } else { _mesa_meta_glsl_Clear(&intel->ctx, tri_mask); } } /* Any strange buffers get passed off to swrast */ if (mask) { debug_mask("swrast", mask); _swrast_Clear(ctx, mask); } }
/* This is the main entrypoint into the slimmed-down software tnl * module. In a regular swtnl driver, this can be plugged straight * into the vbo->Driver.DrawPrims() callback. */ void _tnl_draw_prims( struct gl_context *ctx, const struct gl_client_array *arrays[], const struct _mesa_prim *prim, GLuint nr_prims, const struct _mesa_index_buffer *ib, GLuint min_index, GLuint max_index) { TNLcontext *tnl = TNL_CONTEXT(ctx); const GLuint TEST_SPLIT = 0; const GLint max = TEST_SPLIT ? 8 : tnl->vb.Size - MAX_CLIPPED_VERTICES; GLint max_basevertex = prim->basevertex; GLuint i; /* Mesa core state should have been validated already */ assert(ctx->NewState == 0x0); if (!_mesa_check_conditional_render(ctx)) return; /* don't draw */ for (i = 1; i < nr_prims; i++) max_basevertex = MAX2(max_basevertex, prim[i].basevertex); if (0) { printf("%s %d..%d\n", __FUNCTION__, min_index, max_index); for (i = 0; i < nr_prims; i++) printf("prim %d: %s start %d count %d\n", i, _mesa_lookup_enum_by_nr(prim[i].mode), prim[i].start, prim[i].count); } if (min_index) { /* We always translate away calls with min_index != 0. */ vbo_rebase_prims( ctx, arrays, prim, nr_prims, ib, min_index, max_index, _tnl_vbo_draw_prims ); return; } else if ((GLint)max_index + max_basevertex > max) { /* The software TNL pipeline has a fixed amount of storage for * vertices and it is necessary to split incoming drawing commands * if they exceed that limit. */ struct split_limits limits; limits.max_verts = max; limits.max_vb_size = ~0; limits.max_indices = ~0; /* This will split the buffers one way or another and * recursively call back into this function. */ vbo_split_prims( ctx, arrays, prim, nr_prims, ib, 0, max_index + prim->basevertex, _tnl_vbo_draw_prims, &limits ); } else { /* May need to map a vertex buffer object for every attribute plus * one for the index buffer. */ struct gl_buffer_object *bo[VERT_ATTRIB_MAX + 1]; GLuint nr_bo = 0; GLuint inst; for (i = 0; i < nr_prims;) { GLuint this_nr_prims; /* Our SW TNL pipeline doesn't handle basevertex yet, so bind_indices * will rebase the elements to the basevertex, and we'll only * emit strings of prims with the same basevertex in one draw call. */ for (this_nr_prims = 1; i + this_nr_prims < nr_prims; this_nr_prims++) { if (prim[i].basevertex != prim[i + this_nr_prims].basevertex) break; } assert(prim[i].num_instances > 0); /* Binding inputs may imply mapping some vertex buffer objects. * They will need to be unmapped below. */ for (inst = 0; inst < prim[i].num_instances; inst++) { bind_prims(ctx, &prim[i], this_nr_prims); bind_inputs(ctx, arrays, max_index + prim[i].basevertex + 1, bo, &nr_bo); bind_indices(ctx, ib, bo, &nr_bo); tnl->CurInstance = inst; TNL_CONTEXT(ctx)->Driver.RunPipeline(ctx); unmap_vbos(ctx, bo, nr_bo); free_space(ctx); } i += this_nr_prims; } } }
/** * Software fallback for glBlitFramebufferEXT(). */ void _swrast_BlitFramebuffer(GLcontext *ctx, GLint srcX0, GLint srcY0, GLint srcX1, GLint srcY1, GLint dstX0, GLint dstY0, GLint dstX1, GLint dstY1, GLbitfield mask, GLenum filter) { static const GLbitfield buffers[3] = { GL_COLOR_BUFFER_BIT, GL_DEPTH_BUFFER_BIT, GL_STENCIL_BUFFER_BIT }; GLint i; if (!_mesa_check_conditional_render(ctx)) return; /* don't clear */ if (!ctx->DrawBuffer->_NumColorDrawBuffers) return; if (!_mesa_clip_blit(ctx, &srcX0, &srcY0, &srcX1, &srcY1, &dstX0, &dstY0, &dstX1, &dstY1)) { return; } swrast_render_start(ctx); if (srcX1 - srcX0 == dstX1 - dstX0 && srcY1 - srcY0 == dstY1 - dstY0 && srcX0 < srcX1 && srcY0 < srcY1 && dstX0 < dstX1 && dstY0 < dstY1) { /* no stretching or flipping. * filter doesn't matter. */ for (i = 0; i < 3; i++) { if (mask & buffers[i]) { simple_blit(ctx, srcX0, srcY0, srcX1, srcY1, dstX0, dstY0, dstX1, dstY1, buffers[i]); } } } else { if (filter == GL_NEAREST) { for (i = 0; i < 3; i++) { if (mask & buffers[i]) { blit_nearest(ctx, srcX0, srcY0, srcX1, srcY1, dstX0, dstY0, dstX1, dstY1, buffers[i]); } } } else { ASSERT(filter == GL_LINEAR); if (mask & GL_COLOR_BUFFER_BIT) { /* depth/stencil not allowed */ blit_linear(ctx, srcX0, srcY0, srcX1, srcY1, dstX0, dstY0, dstX1, dstY1); } } } swrast_render_finish(ctx); }
/** * Called by ctx->Driver.Clear. */ static void brw_clear(struct gl_context *ctx, GLbitfield mask) { struct brw_context *brw = brw_context(ctx); struct gl_framebuffer *fb = ctx->DrawBuffer; const struct gen_device_info *devinfo = &brw->screen->devinfo; bool partial_clear = ctx->Scissor.EnableFlags && !noop_scissor(fb); if (!_mesa_check_conditional_render(ctx)) return; if (mask & (BUFFER_BIT_FRONT_LEFT | BUFFER_BIT_FRONT_RIGHT)) { brw->front_buffer_dirty = true; } intel_prepare_render(brw); brw_workaround_depthstencil_alignment(brw, partial_clear ? 0 : mask); if (mask & BUFFER_BIT_DEPTH) { if (brw_fast_clear_depth(ctx)) { DBG("fast clear: depth\n"); mask &= ~BUFFER_BIT_DEPTH; } } if (mask & BUFFER_BIT_STENCIL) { struct intel_renderbuffer *stencil_irb = intel_get_renderbuffer(fb, BUFFER_STENCIL); struct intel_mipmap_tree *mt = stencil_irb->mt; if (mt && mt->stencil_mt) mt->stencil_mt->r8stencil_needs_update = true; } if (mask & BUFFER_BITS_COLOR) { brw_blorp_clear_color(brw, fb, mask, partial_clear, ctx->Color.sRGBEnabled); debug_mask("blorp color", mask & BUFFER_BITS_COLOR); mask &= ~BUFFER_BITS_COLOR; } if (devinfo->gen >= 6 && (mask & BUFFER_BITS_DEPTH_STENCIL)) { brw_blorp_clear_depth_stencil(brw, fb, mask, partial_clear); debug_mask("blorp depth/stencil", mask & BUFFER_BITS_DEPTH_STENCIL); mask &= ~BUFFER_BITS_DEPTH_STENCIL; } GLbitfield tri_mask = mask & (BUFFER_BIT_STENCIL | BUFFER_BIT_DEPTH); if (tri_mask) { debug_mask("tri", tri_mask); mask &= ~tri_mask; _mesa_meta_glsl_Clear(&brw->ctx, tri_mask); } /* Any strange buffers get passed off to swrast. The only thing that * should be left at this point is the accumulation buffer. */ assert((mask & ~BUFFER_BIT_ACCUM) == 0); if (mask) { debug_mask("swrast", mask); _swrast_Clear(ctx, mask); } }
/** * Render a bitmap. * Called via ctx->Driver.Bitmap() * All parameter error checking will have been done before this is called. */ void _swrast_Bitmap( struct gl_context *ctx, GLint px, GLint py, GLsizei width, GLsizei height, const struct gl_pixelstore_attrib *unpack, const GLubyte *bitmap ) { GLint row, col; GLuint count = 0; SWspan span; ASSERT(ctx->RenderMode == GL_RENDER); if (!_mesa_check_conditional_render(ctx)) return; /* don't draw */ bitmap = (const GLubyte *) _mesa_map_pbo_source(ctx, unpack, bitmap); if (!bitmap) return; swrast_render_start(ctx); if (SWRAST_CONTEXT(ctx)->NewState) _swrast_validate_derived( ctx ); INIT_SPAN(span, GL_BITMAP); span.end = width; span.arrayMask = SPAN_XY; _swrast_span_default_attribs(ctx, &span); for (row = 0; row < height; row++) { const GLubyte *src = (const GLubyte *) _mesa_image_address2d(unpack, bitmap, width, height, GL_COLOR_INDEX, GL_BITMAP, row, 0); if (unpack->LsbFirst) { /* Lsb first */ GLubyte mask = 1U << (unpack->SkipPixels & 0x7); for (col = 0; col < width; col++) { if (*src & mask) { span.array->x[count] = px + col; span.array->y[count] = py + row; count++; } if (mask == 128U) { src++; mask = 1U; } else { mask = mask << 1; } } /* get ready for next row */ if (mask != 1) src++; } else { /* Msb first */ GLubyte mask = 128U >> (unpack->SkipPixels & 0x7); for (col = 0; col < width; col++) { if (*src & mask) { span.array->x[count] = px + col; span.array->y[count] = py + row; count++; } if (mask == 1U) { src++; mask = 128U; } else { mask = mask >> 1; } } /* get ready for next row */ if (mask != 128) src++; } if (count + width >= SWRAST_MAX_WIDTH || row + 1 == height) { /* flush the span */ span.end = count; _swrast_write_rgba_span(ctx, &span); span.end = 0; count = 0; } } swrast_render_finish(ctx); _mesa_unmap_pbo_source(ctx, unpack); }
/** * Software fallback for glBlitFramebufferEXT(). */ void _swrast_BlitFramebuffer(struct gl_context *ctx, struct gl_framebuffer *readFb, struct gl_framebuffer *drawFb, GLint srcX0, GLint srcY0, GLint srcX1, GLint srcY1, GLint dstX0, GLint dstY0, GLint dstX1, GLint dstY1, GLbitfield mask, GLenum filter) { static const GLbitfield buffers[3] = { GL_COLOR_BUFFER_BIT, GL_DEPTH_BUFFER_BIT, GL_STENCIL_BUFFER_BIT }; static const GLenum buffer_enums[3] = { GL_COLOR, GL_DEPTH, GL_STENCIL, }; GLint i; /* Page 679 of OpenGL 4.4 spec says: * "Added BlitFramebuffer to commands affected by conditional rendering in * section 10.10 (Bug 9562)." */ if (!_mesa_check_conditional_render(ctx)) return; /* Do not blit */ if (!_mesa_clip_blit(ctx, readFb, drawFb, &srcX0, &srcY0, &srcX1, &srcY1, &dstX0, &dstY0, &dstX1, &dstY1)) { return; } if (SWRAST_CONTEXT(ctx)->NewState) _swrast_validate_derived(ctx); /* First, try covering whatever buffers possible using the fast 1:1 copy * path. */ if (srcX1 - srcX0 == dstX1 - dstX0 && srcY1 - srcY0 == dstY1 - dstY0 && srcX0 < srcX1 && srcY0 < srcY1 && dstX0 < dstX1 && dstY0 < dstY1) { for (i = 0; i < 3; i++) { if (mask & buffers[i]) { if (swrast_fast_copy_pixels(ctx, readFb, drawFb, srcX0, srcY0, srcX1 - srcX0, srcY1 - srcY0, dstX0, dstY0, buffer_enums[i])) { mask &= ~buffers[i]; } } } if (!mask) return; } if (filter == GL_NEAREST) { for (i = 0; i < 3; i++) { if (mask & buffers[i]) { blit_nearest(ctx, readFb, drawFb, srcX0, srcY0, srcX1, srcY1, dstX0, dstY0, dstX1, dstY1, buffers[i]); } } } else { assert(filter == GL_LINEAR); if (mask & GL_COLOR_BUFFER_BIT) { /* depth/stencil not allowed */ blit_linear(ctx, readFb, drawFb, srcX0, srcY0, srcX1, srcY1, dstX0, dstY0, dstX1, dstY1); } } }
/** * Called by ctx->Driver.Clear. */ static void intelClear(struct gl_context *ctx, GLbitfield mask) { struct intel_context *intel = intel_context(ctx); const GLuint colorMask = *((GLuint *) & ctx->Color.ColorMask[0]); GLbitfield tri_mask = 0; GLbitfield blit_mask = 0; GLbitfield swrast_mask = 0; struct gl_framebuffer *fb = ctx->DrawBuffer; struct intel_renderbuffer *irb; int i; if (!_mesa_check_conditional_render(ctx)) return; if (mask & (BUFFER_BIT_FRONT_LEFT | BUFFER_BIT_FRONT_RIGHT)) { intel->front_buffer_dirty = GL_TRUE; } if (0) fprintf(stderr, "%s\n", __FUNCTION__); /* Get SW clears out of the way: Anything without an intel_renderbuffer */ for (i = 0; i < BUFFER_COUNT; i++) { if (!(mask & (1 << i))) continue; irb = intel_get_renderbuffer(fb, i); if (unlikely(!irb)) { swrast_mask |= (1 << i); mask &= ~(1 << i); } } if (unlikely(swrast_mask)) { debug_mask("swrast", swrast_mask); _swrast_Clear(ctx, swrast_mask); } /* HW color buffers (front, back, aux, generic FBO, etc) */ if (intel->gen < 6 && colorMask == ~0) { /* clear all R,G,B,A */ blit_mask |= (mask & BUFFER_BITS_COLOR); } else { /* glColorMask in effect */ tri_mask |= (mask & BUFFER_BITS_COLOR); } /* Make sure we have up to date buffers before we start looking at * the tiling bits to determine how to clear. */ intel_prepare_render(intel); /* HW stencil */ if (mask & BUFFER_BIT_STENCIL) { const struct intel_region *stencilRegion = intel_get_rb_region(fb, BUFFER_STENCIL); if (stencilRegion) { /* have hw stencil */ if (stencilRegion->tiling == I915_TILING_Y || (ctx->Stencil.WriteMask[0] & 0xff) != 0xff) { /* We have to use the 3D engine if we're clearing a partial mask * of the stencil buffer, or if we're on a 965 which has a tiled * depth/stencil buffer in a layout we can't blit to. */ tri_mask |= BUFFER_BIT_STENCIL; } else if (intel->has_separate_stencil && stencilRegion->tiling == I915_TILING_NONE) { /* The stencil buffer is actually W tiled, which the hardware * cannot blit to. */ tri_mask |= BUFFER_BIT_STENCIL; } else { /* clearing all stencil bits, use blitting */ blit_mask |= BUFFER_BIT_STENCIL; } } } /* HW depth */ if (mask & BUFFER_BIT_DEPTH) { const struct intel_region *irb = intel_get_rb_region(fb, BUFFER_DEPTH); /* clear depth with whatever method is used for stencil (see above) */ if (irb->tiling == I915_TILING_Y || tri_mask & BUFFER_BIT_STENCIL) tri_mask |= BUFFER_BIT_DEPTH; else blit_mask |= BUFFER_BIT_DEPTH; } /* If we're doing a tri pass for depth/stencil, include a likely color * buffer with it. */ if (mask & (BUFFER_BIT_DEPTH | BUFFER_BIT_STENCIL)) { int color_bit = _mesa_ffs(mask & BUFFER_BITS_COLOR); if (color_bit != 0) { tri_mask |= blit_mask & (1 << (color_bit - 1)); blit_mask &= ~(1 << (color_bit - 1)); } } /* Anything left, just use tris */ tri_mask |= mask & ~blit_mask; if (blit_mask) { debug_mask("blit", blit_mask); tri_mask |= intelClearWithBlit(ctx, blit_mask); } if (tri_mask) { debug_mask("tri", tri_mask); if (ctx->Extensions.ARB_fragment_shader) _mesa_meta_glsl_Clear(&intel->ctx, tri_mask); else _mesa_meta_Clear(&intel->ctx, tri_mask); } }