/** * Emit a quad (pass to next stage) with clipping. */ static INLINE void clip_emit_quad( struct setup_context *setup, struct quad_header *quad, uint thread ) { quad_clip( setup, quad ); if (quad->inout.mask) { struct softpipe_context *sp = setup->softpipe; sp->quad[thread].first->run( sp->quad[thread].first, quad ); } }
/** * Emit a quad (pass to next stage) with clipping. */ static inline void clip_emit_quad(struct setup_context *setup, struct quad_header *quad) { quad_clip(setup, quad); if (quad->inout.mask) { struct softpipe_context *sp = setup->softpipe; #if DEBUG_FRAGS setup->numFragsEmitted += util_bitcount(quad->inout.mask); #endif sp->quad.first->run( sp->quad.first, &quad, 1 ); } }
/** * Emit a quad (pass to next stage) with clipping. */ static INLINE void clip_emit_quad( struct setup_context *setup, struct quad_header *quad ) { quad_clip( setup, quad ); if (quad->inout.mask) { struct llvmpipe_context *lp = setup->llvmpipe; #if 1 /* XXX: The blender expects 4 quads. This is far from efficient, but * until we codegenerate single-quad variants of the fragment pipeline * we need this hack. */ const unsigned nr_quads = TILE_VECTOR_HEIGHT*TILE_VECTOR_WIDTH/QUAD_SIZE; struct quad_header quads[4]; struct quad_header *quad_ptrs[4]; int x0 = block_x(quad->input.x0); unsigned i; assert(nr_quads == 4); for(i = 0; i < nr_quads; ++i) { int x = x0 + 2*i; if(x == quad->input.x0) memcpy(&quads[i], quad, sizeof quads[i]); else { memset(&quads[i], 0, sizeof quads[i]); quads[i].input.x0 = x; quads[i].input.y0 = quad->input.y0; quads[i].coef = quad->coef; } quad_ptrs[i] = &quads[i]; } shade_quads( lp, quad_ptrs, nr_quads ); #else shade_quads( lp, &quad, 1 ); #endif } }