static void r300_simple_msaa_resolve(struct pipe_context *pipe, struct pipe_resource *dst, unsigned dst_level, unsigned dst_layer, struct pipe_resource *src, enum pipe_format format) { struct r300_context *r300 = r300_context(pipe); struct r300_surface *srcsurf, *dstsurf; struct pipe_surface surf_tmpl; struct r300_aa_state *aa = (struct r300_aa_state*)r300->aa_state.state; memset(&surf_tmpl, 0, sizeof(surf_tmpl)); surf_tmpl.format = format; srcsurf = r300_surface(pipe->create_surface(pipe, src, &surf_tmpl)); surf_tmpl.format = format; surf_tmpl.u.tex.level = dst_level; surf_tmpl.u.tex.first_layer = surf_tmpl.u.tex.last_layer = dst_layer; dstsurf = r300_surface(pipe->create_surface(pipe, dst, &surf_tmpl)); /* COLORPITCH should contain the tiling info of the resolve buffer. * The tiling of the AA buffer isn't programmable anyway. */ srcsurf->pitch &= ~(R300_COLOR_TILE(1) | R300_COLOR_MICROTILE(3)); srcsurf->pitch |= dstsurf->pitch & (R300_COLOR_TILE(1) | R300_COLOR_MICROTILE(3)); /* Enable AA resolve. */ aa->dest = dstsurf; r300->aa_state.size = 8; r300_mark_atom_dirty(r300, &r300->aa_state); /* Resolve the surface. */ r300_blitter_begin(r300, R300_CLEAR_SURFACE); util_blitter_custom_color(r300->blitter, &srcsurf->base, NULL); r300_blitter_end(r300); /* Disable AA resolve. */ aa->dest = NULL; r300->aa_state.size = 4; r300_mark_atom_dirty(r300, &r300->aa_state); pipe_surface_reference((struct pipe_surface**)&srcsurf, NULL); pipe_surface_reference((struct pipe_surface**)&dstsurf, NULL); }
static boolean r300_cbzb_clear_allowed(struct r300_context *r300, unsigned clear_buffers) { struct pipe_framebuffer_state *fb = (struct pipe_framebuffer_state*)r300->fb_state.state; /* Only color clear allowed, and only one colorbuffer. */ if (clear_buffers != PIPE_CLEAR_COLOR || fb->nr_cbufs != 1) return FALSE; return r300_surface(fb->cbufs[0])->cbzb_allowed; }
static void r300_resource_resolve(struct pipe_context *pipe, const struct pipe_resolve_info *info) { struct r300_context *r300 = r300_context(pipe); struct pipe_surface *srcsurf, *dstsurf, surf_tmpl; struct r300_aa_state *aa = (struct r300_aa_state*)r300->aa_state.state; static const union pipe_color_union color; memset(&surf_tmpl, 0, sizeof(surf_tmpl)); surf_tmpl.format = info->src.res->format; surf_tmpl.u.tex.first_layer = surf_tmpl.u.tex.last_layer = info->src.layer; srcsurf = pipe->create_surface(pipe, info->src.res, &surf_tmpl); /* XXX Offset both surfaces by x0,y1. */ surf_tmpl.format = info->dst.res->format; surf_tmpl.u.tex.level = info->dst.level; surf_tmpl.u.tex.first_layer = surf_tmpl.u.tex.last_layer = info->dst.layer; dstsurf = pipe->create_surface(pipe, info->dst.res, &surf_tmpl); DBG(r300, DBG_DRAW, "r300: Resolving resource...\n"); /* Enable AA resolve. */ aa->dest = r300_surface(dstsurf); aa->aaresolve_ctl = R300_RB3D_AARESOLVE_CTL_AARESOLVE_MODE_RESOLVE | R300_RB3D_AARESOLVE_CTL_AARESOLVE_ALPHA_AVERAGE; r300->aa_state.size = 10; r300_mark_atom_dirty(r300, &r300->aa_state); /* Resolve the surface. */ /* XXX: y1 < 0 ==> Y flip */ r300->context.clear_render_target(pipe, srcsurf, &color, 0, 0, info->dst.x1 - info->dst.x0, info->dst.y1 - info->dst.y0); /* Disable AA resolve. */ aa->aaresolve_ctl = 0; r300->aa_state.size = 4; r300_mark_atom_dirty(r300, &r300->aa_state); pipe_surface_reference(&srcsurf, NULL); pipe_surface_reference(&dstsurf, NULL); }
void r300_emit_blend_state(struct r300_context* r300, unsigned size, void* state) { struct r300_blend_state* blend = (struct r300_blend_state*)state; struct pipe_framebuffer_state* fb = (struct pipe_framebuffer_state*)r300->fb_state.state; CS_LOCALS(r300); if (fb->nr_cbufs) { if (fb->cbufs[0]->format == PIPE_FORMAT_R16G16B16A16_FLOAT) { WRITE_CS_TABLE(blend->cb_noclamp, size); } else { unsigned swz = r300_surface(fb->cbufs[0])->colormask_swizzle; WRITE_CS_TABLE(blend->cb_clamp[swz], size); } } else { WRITE_CS_TABLE(blend->cb_no_readwrite, size); } }
void r300_emit_gpu_flush(struct r300_context *r300, unsigned size, void *state) { struct r300_gpu_flush *gpuflush = (struct r300_gpu_flush*)state; struct pipe_framebuffer_state* fb = (struct pipe_framebuffer_state*)r300->fb_state.state; uint32_t height = fb->height; uint32_t width = fb->width; CS_LOCALS(r300); if (r300->cbzb_clear) { struct r300_surface *surf = r300_surface(fb->cbufs[0]); height = surf->cbzb_height; width = surf->cbzb_width; } DBG(r300, DBG_SCISSOR, "r300: Scissor width: %i, height: %i, CBZB clear: %s\n", width, height, r300->cbzb_clear ? "YES" : "NO"); BEGIN_CS(size); /* Set up scissors. * By writing to the SC registers, SC & US assert idle. */ OUT_CS_REG_SEQ(R300_SC_SCISSORS_TL, 2); if (r300->screen->caps.is_r500) { OUT_CS(0); OUT_CS(((width - 1) << R300_SCISSORS_X_SHIFT) | ((height - 1) << R300_SCISSORS_Y_SHIFT)); } else { OUT_CS((1440 << R300_SCISSORS_X_SHIFT) | (1440 << R300_SCISSORS_Y_SHIFT)); OUT_CS(((width + 1440-1) << R300_SCISSORS_X_SHIFT) | ((height + 1440-1) << R300_SCISSORS_Y_SHIFT)); } /* Flush CB & ZB caches and wait until the 3D engine is idle and clean. */ OUT_CS_TABLE(gpuflush->cb_flush_clean, 6); END_CS; }
static void r300_resource_resolve(struct pipe_context* pipe, struct pipe_resource* dest, struct pipe_subresource subdest, struct pipe_resource* src, struct pipe_subresource subsrc) { struct r300_context* r300 = r300_context(pipe); struct r300_aa_state *aa = (struct r300_aa_state*)r300->aa_state.state; struct pipe_surface* srcsurf = src->screen->get_tex_surface(src->screen, src, subsrc.face, subsrc.level, 0, 0); float color[] = {0, 0, 0, 0}; DBG(r300, DBG_DRAW, "r300: Resolving resource...\n"); /* Enable AA resolve. */ aa->dest = r300_surface( dest->screen->get_tex_surface(dest->screen, dest, subdest.face, subdest.level, 0, 0)); aa->aaresolve_ctl = R300_RB3D_AARESOLVE_CTL_AARESOLVE_MODE_RESOLVE | R300_RB3D_AARESOLVE_CTL_AARESOLVE_ALPHA_AVERAGE; r300->aa_state.size = 12; r300->aa_state.dirty = TRUE; /* Resolve the surface. */ r300->context.clear_render_target(pipe, srcsurf, color, 0, 0, src->width0, src->height0); /* Disable AA resolve. */ aa->aaresolve_ctl = 0; r300->aa_state.size = 4; r300->aa_state.dirty = TRUE; pipe_surface_reference((struct pipe_surface**)&srcsurf, NULL); pipe_surface_reference((struct pipe_surface**)&aa->dest, NULL); }
void r300_emit_blend_state(struct r300_context* r300, unsigned size, void* state) { struct r300_blend_state* blend = (struct r300_blend_state*)state; struct pipe_framebuffer_state* fb = (struct pipe_framebuffer_state*)r300->fb_state.state; struct pipe_surface *cb; CS_LOCALS(r300); cb = fb->nr_cbufs ? r300_get_nonnull_cb(fb, 0) : NULL; if (cb) { if (cb->format == PIPE_FORMAT_R16G16B16A16_FLOAT) { WRITE_CS_TABLE(blend->cb_noclamp, size); } else if (cb->format == PIPE_FORMAT_R16G16B16X16_FLOAT) { WRITE_CS_TABLE(blend->cb_noclamp_noalpha, size); } else { unsigned swz = r300_surface(cb)->colormask_swizzle; WRITE_CS_TABLE(blend->cb_clamp[swz], size); } } else { WRITE_CS_TABLE(blend->cb_no_readwrite, size); } }
/* Clear currently bound buffers. */ static void r300_clear(struct pipe_context* pipe, unsigned buffers, const union pipe_color_union *color, double depth, unsigned stencil) { /* My notes about Zbuffer compression: * * 1) The zbuffer must be micro-tiled and whole microtiles must be * written if compression is enabled. If microtiling is disabled, * it locks up. * * 2) There is ZMASK RAM which contains a compressed zbuffer. * Each dword of the Z Mask contains compression information * for 16 4x4 pixel tiles, that is 2 bits for each tile. * On chips with 2 Z pipes, every other dword maps to a different * pipe. On newer chipsets, there is a new compression mode * with 8x8 pixel tiles per 2 bits. * * 3) The FASTFILL bit has nothing to do with filling. It only tells hw * it should look in the ZMASK RAM first before fetching from a real * zbuffer. * * 4) If a pixel is in a cleared state, ZB_DEPTHCLEARVALUE is returned * during zbuffer reads instead of the value that is actually stored * in the zbuffer memory. A pixel is in a cleared state when its ZMASK * is equal to 0. Therefore, if you clear ZMASK with zeros, you may * leave the zbuffer memory uninitialized, but then you must enable * compression, so that the ZMASK RAM is actually used. * * 5) Each 4x4 (or 8x8) tile is automatically decompressed and recompressed * during zbuffer updates. A special decompressing operation should be * used to fully decompress a zbuffer, which basically just stores all * compressed tiles in ZMASK to the zbuffer memory. * * 6) For a 16-bit zbuffer, compression causes a hung with one or * two samples and should not be used. * * 7) FORCE_COMPRESSED_STENCIL_VALUE should be enabled for stencil clears * to avoid needless decompression. * * 8) Fastfill must not be used if reading of compressed Z data is disabled * and writing of compressed Z data is enabled (RD/WR_COMP_ENABLE), * i.e. it cannot be used to compress the zbuffer. * * 9) ZB_CB_CLEAR does not interact with zbuffer compression in any way. * * - Marek */ struct r300_context* r300 = r300_context(pipe); struct pipe_framebuffer_state *fb = (struct pipe_framebuffer_state*)r300->fb_state.state; struct r300_hyperz_state *hyperz = (struct r300_hyperz_state*)r300->hyperz_state.state; uint32_t width = fb->width; uint32_t height = fb->height; uint32_t hyperz_dcv = hyperz->zb_depthclearvalue; /* Enable fast Z clear. * The zbuffer must be in micro-tiled mode, otherwise it locks up. */ if (buffers & PIPE_CLEAR_DEPTHSTENCIL) { boolean zmask_clear, hiz_clear; zmask_clear = r300_fast_zclear_allowed(r300); hiz_clear = r300_hiz_clear_allowed(r300); /* If we need Hyper-Z. */ if (zmask_clear || hiz_clear) { r300->num_z_clears++; /* Try to obtain the access to Hyper-Z buffers if we don't have one. */ if (!r300->hyperz_enabled) { r300->hyperz_enabled = r300->rws->cs_request_feature(r300->cs, RADEON_FID_R300_HYPERZ_ACCESS, TRUE); if (r300->hyperz_enabled) { /* Need to emit HyperZ buffer regs for the first time. */ r300_mark_fb_state_dirty(r300, R300_CHANGED_HYPERZ_FLAG); } } /* Setup Hyper-Z clears. */ if (r300->hyperz_enabled) { DBG(r300, DBG_HYPERZ, "r300: Clear memory: %s%s\n", zmask_clear ? "ZMASK " : "", hiz_clear ? "HIZ" : ""); if (zmask_clear) { hyperz_dcv = hyperz->zb_depthclearvalue = r300_depth_clear_value(fb->zsbuf->format, depth, stencil); r300_mark_atom_dirty(r300, &r300->zmask_clear); buffers &= ~PIPE_CLEAR_DEPTHSTENCIL; } if (hiz_clear) { r300->hiz_clear_value = r300_hiz_clear_value(depth); r300_mark_atom_dirty(r300, &r300->hiz_clear); } } } } /* Enable CBZB clear. */ if (r300_cbzb_clear_allowed(r300, buffers)) { struct r300_surface *surf = r300_surface(fb->cbufs[0]); hyperz->zb_depthclearvalue = r300_depth_clear_cb_value(surf->base.format, color->f); width = surf->cbzb_width; height = surf->cbzb_height; r300->cbzb_clear = TRUE; r300_mark_fb_state_dirty(r300, R300_CHANGED_HYPERZ_FLAG); } /* Clear. */ if (buffers) { enum pipe_format cformat = fb->nr_cbufs ? fb->cbufs[0]->format : PIPE_FORMAT_NONE; /* Clear using the blitter. */ r300_blitter_begin(r300, R300_CLEAR); util_blitter_clear(r300->blitter, width, height, fb->nr_cbufs, buffers, cformat, color, depth, stencil); r300_blitter_end(r300); } else if (r300->zmask_clear.dirty || r300->hiz_clear.dirty) { /* Just clear zmask and hiz now, this does not use the standard draw * procedure. */ /* Calculate zmask_clear and hiz_clear atom sizes. */ unsigned dwords = (r300->zmask_clear.dirty ? r300->zmask_clear.size : 0) + (r300->hiz_clear.dirty ? r300->hiz_clear.size : 0) + r300_get_num_cs_end_dwords(r300); /* Reserve CS space. */ if (dwords > (RADEON_MAX_CMDBUF_DWORDS - r300->cs->cdw)) { r300_flush(&r300->context, RADEON_FLUSH_ASYNC, NULL); } /* Emit clear packets. */ if (r300->zmask_clear.dirty) { r300_emit_zmask_clear(r300, r300->zmask_clear.size, r300->zmask_clear.state); r300->zmask_clear.dirty = FALSE; } if (r300->hiz_clear.dirty) { r300_emit_hiz_clear(r300, r300->hiz_clear.size, r300->hiz_clear.state); r300->hiz_clear.dirty = FALSE; } } else { assert(0); } /* Disable CBZB clear. */ if (r300->cbzb_clear) { r300->cbzb_clear = FALSE; hyperz->zb_depthclearvalue = hyperz_dcv; r300_mark_fb_state_dirty(r300, R300_CHANGED_HYPERZ_FLAG); } /* Enable fastfill and/or hiz. * * If we cleared zmask/hiz, it's in use now. The Hyper-Z state update * looks if zmask/hiz is in use and programs hardware accordingly. */ if (r300->zmask_in_use || r300->hiz_in_use) { r300_mark_atom_dirty(r300, &r300->hyperz_state); } }
/* Clear currently bound buffers. */ static void r300_clear(struct pipe_context* pipe, unsigned buffers, const float* rgba, double depth, unsigned stencil) { /* My notes about fastfill: * * 1) Only the zbuffer is cleared. * * 2) The zbuffer must be micro-tiled and whole microtiles must be * written. If microtiling is disabled, it locks up. * * 3) There is Z Mask RAM which contains a compressed zbuffer and * it interacts with fastfill. We should figure out how to use it * to get more performance. * This is what we know about the Z Mask: * * Each dword of the Z Mask contains compression information * for 16 4x4 pixel blocks, that is 2 bits for each block. * On chips with 2 Z pipes, every other dword maps to a different * pipe. * * 4) ZB_DEPTHCLEARVALUE is used to clear the zbuffer and the Z Mask must * be equal to 0. (clear the Z Mask RAM with zeros) * * 5) For 16-bit zbuffer, compression causes a hung with one or * two samples and should not be used. * * 6) FORCE_COMPRESSED_STENCIL_VALUE should be enabled for stencil clears * to avoid needless decompression. * * 7) Fastfill must not be used if reading of compressed Z data is disabled * and writing of compressed Z data is enabled (RD/WR_COMP_ENABLE), * i.e. it cannot be used to compress the zbuffer. * * 8) ZB_CB_CLEAR does not interact with fastfill in any way. * * - Marek */ struct r300_context* r300 = r300_context(pipe); struct pipe_framebuffer_state *fb = (struct pipe_framebuffer_state*)r300->fb_state.state; struct r300_hyperz_state *hyperz = (struct r300_hyperz_state*)r300->hyperz_state.state; struct r300_texture *zstex = fb->zsbuf ? r300_texture(fb->zsbuf->texture) : NULL; uint32_t width = fb->width; uint32_t height = fb->height; boolean can_hyperz = r300->rws->get_value(r300->rws, R300_CAN_HYPERZ); uint32_t hyperz_dcv = hyperz->zb_depthclearvalue; /* Enable fast Z clear. * The zbuffer must be in micro-tiled mode, otherwise it locks up. */ if ((buffers & PIPE_CLEAR_DEPTHSTENCIL) && can_hyperz) { hyperz_dcv = hyperz->zb_depthclearvalue = r300_depth_clear_value(fb->zsbuf->format, depth, stencil); r300_mark_fb_state_dirty(r300, R300_CHANGED_ZCLEAR_FLAG); if (zstex->zmask_mem[fb->zsbuf->u.tex.level]) { r300_mark_atom_dirty(r300, &r300->zmask_clear); buffers &= ~PIPE_CLEAR_DEPTHSTENCIL; } if (zstex->hiz_mem[fb->zsbuf->u.tex.level]) r300_mark_atom_dirty(r300, &r300->hiz_clear); } /* Enable CBZB clear. */ if (r300_cbzb_clear_allowed(r300, buffers)) { struct r300_surface *surf = r300_surface(fb->cbufs[0]); hyperz->zb_depthclearvalue = r300_depth_clear_cb_value(surf->base.format, rgba); width = surf->cbzb_width; height = surf->cbzb_height; r300->cbzb_clear = TRUE; r300_mark_fb_state_dirty(r300, R300_CHANGED_CBZB_FLAG); } /* Clear. */ if (buffers) { /* Clear using the blitter. */ r300_blitter_begin(r300, R300_CLEAR); util_blitter_clear(r300->blitter, width, height, fb->nr_cbufs, buffers, rgba, depth, stencil); r300_blitter_end(r300); } else if (r300->zmask_clear.dirty) { /* Just clear zmask and hiz now, this does not use a standard draw * procedure. */ unsigned dwords; /* Calculate zmask_clear and hiz_clear atom sizes. */ r300_update_hyperz_state(r300); dwords = r300->zmask_clear.size + (r300->hiz_clear.dirty ? r300->hiz_clear.size : 0) + r300_get_num_cs_end_dwords(r300); /* Reserve CS space. */ if (dwords > (R300_MAX_CMDBUF_DWORDS - r300->cs->cdw)) { r300->context.flush(&r300->context, 0, NULL); } /* Emit clear packets. */ r300_emit_zmask_clear(r300, r300->zmask_clear.size, r300->zmask_clear.state); r300->zmask_clear.dirty = FALSE; if (r300->hiz_clear.dirty) { r300_emit_hiz_clear(r300, r300->hiz_clear.size, r300->hiz_clear.state); r300->hiz_clear.dirty = FALSE; } } else { assert(0); } /* Disable CBZB clear. */ if (r300->cbzb_clear) { r300->cbzb_clear = FALSE; hyperz->zb_depthclearvalue = hyperz_dcv; r300_mark_fb_state_dirty(r300, R300_CHANGED_CBZB_FLAG); } /* Enable fastfill and/or hiz. * * If we cleared zmask/hiz, it's in use now. The Hyper-Z state update * looks if zmask/hiz is in use and enables fastfill accordingly. */ if (zstex && (zstex->zmask_in_use[fb->zsbuf->u.tex.level] || zstex->hiz_in_use[fb->zsbuf->u.tex.level])) { r300_mark_atom_dirty(r300, &r300->hyperz_state); } }
void r300_emit_fb_state_pipelined(struct r300_context *r300, unsigned size, void *state) { /* The sample coordinates are in the range [0,11], because * GB_TILE_CONFIG.SUBPIXEL is set to the 1/12 subpixel precision. * * Some sample coordinates reach to neighboring pixels and should not be used. * (e.g. Y=11) * * The unused samples must be set to the positions of other valid samples. */ static unsigned sample_locs_1x[12] = { 6,6, 6,6, 6,6, 6,6, 6,6, 6,6 }; static unsigned sample_locs_2x[12] = { 3,9, 9,3, 9,3, 9,3, 9,3, 9,3 }; static unsigned sample_locs_4x[12] = { 4,4, 8,8, 2,10, 10,2, 10,2, 10,2 }; static unsigned sample_locs_6x[12] = { 3,1, 7,3, 11,5, 1,7, 5,9, 9,10 }; struct pipe_framebuffer_state* fb = (struct pipe_framebuffer_state*)r300->fb_state.state; unsigned i, num_cbufs = fb->nr_cbufs; unsigned mspos0, mspos1; CS_LOCALS(r300); /* If we use the multiwrite feature, the colorbuffers 2,3,4 must be * marked as UNUSED in the US block. */ if (r300->fb_multiwrite) { num_cbufs = MIN2(num_cbufs, 1); } BEGIN_CS(size); /* Colorbuffer format in the US block. * (must be written after unpipelined regs) */ OUT_CS_REG_SEQ(R300_US_OUT_FMT_0, 4); for (i = 0; i < num_cbufs; i++) { OUT_CS(r300_surface(r300_get_nonnull_cb(fb, i))->format); } for (; i < 1; i++) { OUT_CS(R300_US_OUT_FMT_C4_8 | R300_C0_SEL_B | R300_C1_SEL_G | R300_C2_SEL_R | R300_C3_SEL_A); } for (; i < 4; i++) { OUT_CS(R300_US_OUT_FMT_UNUSED); } /* Set sample positions. It depends on the framebuffer sample count. * These are pipelined regs and as such cannot be moved to the AA state. */ switch (r300->num_samples) { default: mspos0 = r300_get_mspos(0, sample_locs_1x); mspos1 = r300_get_mspos(1, sample_locs_1x); break; case 2: mspos0 = r300_get_mspos(0, sample_locs_2x); mspos1 = r300_get_mspos(1, sample_locs_2x); break; case 4: mspos0 = r300_get_mspos(0, sample_locs_4x); mspos1 = r300_get_mspos(1, sample_locs_4x); break; case 6: mspos0 = r300_get_mspos(0, sample_locs_6x); mspos1 = r300_get_mspos(1, sample_locs_6x); break; } OUT_CS_REG_SEQ(R300_GB_MSPOS0, 2); OUT_CS(mspos0); OUT_CS(mspos1); END_CS; }
void r300_emit_fb_state(struct r300_context* r300, unsigned size, void* state) { struct pipe_framebuffer_state* fb = (struct pipe_framebuffer_state*)state; struct r300_surface* surf; unsigned i; uint32_t rb3d_cctl = 0; CS_LOCALS(r300); BEGIN_CS(size); if (r300->screen->caps.is_r500) { rb3d_cctl = R300_RB3D_CCTL_INDEPENDENT_COLORFORMAT_ENABLE_ENABLE; } /* NUM_MULTIWRITES replicates COLOR[0] to all colorbuffers. */ if (fb->nr_cbufs && r300->fb_multiwrite) { rb3d_cctl |= R300_RB3D_CCTL_NUM_MULTIWRITES(fb->nr_cbufs); } if (r300->cmask_in_use) { rb3d_cctl |= R300_RB3D_CCTL_AA_COMPRESSION_ENABLE | R300_RB3D_CCTL_CMASK_ENABLE; } OUT_CS_REG(R300_RB3D_CCTL, rb3d_cctl); /* Set up colorbuffers. */ for (i = 0; i < fb->nr_cbufs; i++) { surf = r300_surface(r300_get_nonnull_cb(fb, i)); OUT_CS_REG(R300_RB3D_COLOROFFSET0 + (4 * i), surf->offset); OUT_CS_RELOC(surf); OUT_CS_REG(R300_RB3D_COLORPITCH0 + (4 * i), surf->pitch); OUT_CS_RELOC(surf); if (r300->cmask_in_use && i == 0) { OUT_CS_REG(R300_RB3D_CMASK_OFFSET0, 0); OUT_CS_REG(R300_RB3D_CMASK_PITCH0, surf->pitch_cmask); OUT_CS_REG(R300_RB3D_COLOR_CLEAR_VALUE, r300->color_clear_value); if (r300->screen->caps.is_r500 && r300->screen->info.drm_minor >= 29) { OUT_CS_REG_SEQ(R500_RB3D_COLOR_CLEAR_VALUE_AR, 2); OUT_CS(r300->color_clear_value_ar); OUT_CS(r300->color_clear_value_gb); } } } /* Set up the ZB part of the CBZB clear. */ if (r300->cbzb_clear) { surf = r300_surface(fb->cbufs[0]); OUT_CS_REG(R300_ZB_FORMAT, surf->cbzb_format); OUT_CS_REG(R300_ZB_DEPTHOFFSET, surf->cbzb_midpoint_offset); OUT_CS_RELOC(surf); OUT_CS_REG(R300_ZB_DEPTHPITCH, surf->cbzb_pitch); OUT_CS_RELOC(surf); DBG(r300, DBG_CBZB, "CBZB clearing cbuf %08x %08x\n", surf->cbzb_format, surf->cbzb_pitch); } /* Set up a zbuffer. */ else if (fb->zsbuf) { surf = r300_surface(fb->zsbuf); OUT_CS_REG(R300_ZB_FORMAT, surf->format); OUT_CS_REG(R300_ZB_DEPTHOFFSET, surf->offset); OUT_CS_RELOC(surf); OUT_CS_REG(R300_ZB_DEPTHPITCH, surf->pitch); OUT_CS_RELOC(surf); if (r300->hyperz_enabled) { /* HiZ RAM. */ OUT_CS_REG(R300_ZB_HIZ_OFFSET, 0); OUT_CS_REG(R300_ZB_HIZ_PITCH, surf->pitch_hiz); /* Z Mask RAM. (compressed zbuffer) */ OUT_CS_REG(R300_ZB_ZMASK_OFFSET, 0); OUT_CS_REG(R300_ZB_ZMASK_PITCH, surf->pitch_zmask); } } END_CS; }
boolean r300_emit_buffer_validate(struct r300_context *r300, boolean do_validate_vertex_buffers, struct pipe_resource *index_buffer) { struct pipe_framebuffer_state *fb = (struct pipe_framebuffer_state*)r300->fb_state.state; struct r300_aa_state *aa = (struct r300_aa_state*)r300->aa_state.state; struct r300_textures_state *texstate = (struct r300_textures_state*)r300->textures_state.state; struct r300_resource *tex; unsigned i; boolean flushed = FALSE; validate: if (r300->fb_state.dirty) { /* Color buffers... */ for (i = 0; i < fb->nr_cbufs; i++) { if (!fb->cbufs[i]) continue; tex = r300_resource(fb->cbufs[i]->texture); assert(tex && tex->buf && "cbuf is marked, but NULL!"); r300->rws->cs_add_reloc(r300->cs, tex->cs_buf, RADEON_USAGE_READWRITE, r300_surface(fb->cbufs[i])->domain, tex->b.b.nr_samples > 1 ? RADEON_PRIO_COLOR_BUFFER_MSAA : RADEON_PRIO_COLOR_BUFFER); } /* ...depth buffer... */ if (fb->zsbuf) { tex = r300_resource(fb->zsbuf->texture); assert(tex && tex->buf && "zsbuf is marked, but NULL!"); r300->rws->cs_add_reloc(r300->cs, tex->cs_buf, RADEON_USAGE_READWRITE, r300_surface(fb->zsbuf)->domain, tex->b.b.nr_samples > 1 ? RADEON_PRIO_DEPTH_BUFFER_MSAA : RADEON_PRIO_DEPTH_BUFFER); } } /* The AA resolve buffer. */ if (r300->aa_state.dirty) { if (aa->dest) { r300->rws->cs_add_reloc(r300->cs, aa->dest->cs_buf, RADEON_USAGE_WRITE, aa->dest->domain, RADEON_PRIO_COLOR_BUFFER); } } if (r300->textures_state.dirty) { /* ...textures... */ for (i = 0; i < texstate->count; i++) { if (!(texstate->tx_enable & (1 << i))) { continue; } tex = r300_resource(texstate->sampler_views[i]->base.texture); r300->rws->cs_add_reloc(r300->cs, tex->cs_buf, RADEON_USAGE_READ, tex->domain, RADEON_PRIO_SHADER_TEXTURE_RO); } } /* ...occlusion query buffer... */ if (r300->query_current) r300->rws->cs_add_reloc(r300->cs, r300->query_current->cs_buf, RADEON_USAGE_WRITE, RADEON_DOMAIN_GTT, RADEON_PRIO_MIN); /* ...vertex buffer for SWTCL path... */ if (r300->vbo_cs) r300->rws->cs_add_reloc(r300->cs, r300->vbo_cs, RADEON_USAGE_READ, RADEON_DOMAIN_GTT, RADEON_PRIO_MIN); /* ...vertex buffers for HWTCL path... */ if (do_validate_vertex_buffers && r300->vertex_arrays_dirty) { struct pipe_vertex_buffer *vbuf = r300->vertex_buffer; struct pipe_vertex_buffer *last = r300->vertex_buffer + r300->nr_vertex_buffers; struct pipe_resource *buf; for (; vbuf != last; vbuf++) { buf = vbuf->buffer; if (!buf) continue; r300->rws->cs_add_reloc(r300->cs, r300_resource(buf)->cs_buf, RADEON_USAGE_READ, r300_resource(buf)->domain, RADEON_PRIO_SHADER_BUFFER_RO); } } /* ...and index buffer for HWTCL path. */ if (index_buffer) r300->rws->cs_add_reloc(r300->cs, r300_resource(index_buffer)->cs_buf, RADEON_USAGE_READ, r300_resource(index_buffer)->domain, RADEON_PRIO_MIN); /* Now do the validation (flush is called inside cs_validate on failure). */ if (!r300->rws->cs_validate(r300->cs)) { /* Ooops, an infinite loop, give up. */ if (flushed) return FALSE; flushed = TRUE; goto validate; } return TRUE; }
void r300_emit_fb_state_pipelined(struct r300_context *r300, unsigned size, void *state) { struct pipe_framebuffer_state* fb = (struct pipe_framebuffer_state*)r300->fb_state.state; unsigned i, num_cbufs = fb->nr_cbufs; unsigned mspos0, mspos1; CS_LOCALS(r300); /* If we use the multiwrite feature, the colorbuffers 2,3,4 must be * marked as UNUSED in the US block. */ if (r300->fb_multiwrite) { num_cbufs = MIN2(num_cbufs, 1); } BEGIN_CS(size); /* Colorbuffer format in the US block. * (must be written after unpipelined regs) */ OUT_CS_REG_SEQ(R300_US_OUT_FMT_0, 4); for (i = 0; i < num_cbufs; i++) { OUT_CS(r300_surface(fb->cbufs[i])->format); } for (; i < 1; i++) { OUT_CS(R300_US_OUT_FMT_C4_8 | R300_C0_SEL_B | R300_C1_SEL_G | R300_C2_SEL_R | R300_C3_SEL_A); } for (; i < 4; i++) { OUT_CS(R300_US_OUT_FMT_UNUSED); } /* Multisampling. Depends on framebuffer sample count. * These are pipelined regs and as such cannot be moved * to the AA state. */ mspos0 = 0x66666666; mspos1 = 0x6666666; if (fb->nr_cbufs && fb->cbufs[0]->texture->nr_samples > 1) { /* Subsample placement. These may not be optimal. */ switch (fb->cbufs[0]->texture->nr_samples) { case 2: mspos0 = 0x33996633; mspos1 = 0x6666663; break; case 3: mspos0 = 0x33936933; mspos1 = 0x6666663; break; case 4: mspos0 = 0x33939933; mspos1 = 0x3966663; break; case 6: mspos0 = 0x22a2aa22; mspos1 = 0x2a65672; break; default: debug_printf("r300: Bad number of multisamples!\n"); } } OUT_CS_REG_SEQ(R300_GB_MSPOS0, 2); OUT_CS(mspos0); OUT_CS(mspos1); END_CS; }
void r300_emit_fb_state(struct r300_context* r300, unsigned size, void* state) { struct pipe_framebuffer_state* fb = (struct pipe_framebuffer_state*)state; struct r300_surface* surf; unsigned i; uint32_t rb3d_cctl = 0; CS_LOCALS(r300); BEGIN_CS(size); /* NUM_MULTIWRITES replicates COLOR[0] to all colorbuffers, which is not * what we usually want. */ if (r300->screen->caps.is_r500) { rb3d_cctl = R300_RB3D_CCTL_INDEPENDENT_COLORFORMAT_ENABLE_ENABLE; } if (fb->nr_cbufs && r300->fb_multiwrite) { rb3d_cctl |= R300_RB3D_CCTL_NUM_MULTIWRITES(fb->nr_cbufs); } OUT_CS_REG(R300_RB3D_CCTL, rb3d_cctl); /* Set up colorbuffers. */ for (i = 0; i < fb->nr_cbufs; i++) { surf = r300_surface(fb->cbufs[i]); OUT_CS_REG(R300_RB3D_COLOROFFSET0 + (4 * i), surf->offset); OUT_CS_RELOC(surf); OUT_CS_REG(R300_RB3D_COLORPITCH0 + (4 * i), surf->pitch); OUT_CS_RELOC(surf); } /* Set up the ZB part of the CBZB clear. */ if (r300->cbzb_clear) { surf = r300_surface(fb->cbufs[0]); OUT_CS_REG(R300_ZB_FORMAT, surf->cbzb_format); OUT_CS_REG(R300_ZB_DEPTHOFFSET, surf->cbzb_midpoint_offset); OUT_CS_RELOC(surf); OUT_CS_REG(R300_ZB_DEPTHPITCH, surf->cbzb_pitch); OUT_CS_RELOC(surf); DBG(r300, DBG_CBZB, "CBZB clearing cbuf %08x %08x\n", surf->cbzb_format, surf->cbzb_pitch); } /* Set up a zbuffer. */ else if (fb->zsbuf) { surf = r300_surface(fb->zsbuf); OUT_CS_REG(R300_ZB_FORMAT, surf->format); OUT_CS_REG(R300_ZB_DEPTHOFFSET, surf->offset); OUT_CS_RELOC(surf); OUT_CS_REG(R300_ZB_DEPTHPITCH, surf->pitch); OUT_CS_RELOC(surf); if (r300->hyperz_enabled) { /* HiZ RAM. */ OUT_CS_REG(R300_ZB_HIZ_OFFSET, 0); OUT_CS_REG(R300_ZB_HIZ_PITCH, surf->pitch_hiz); /* Z Mask RAM. (compressed zbuffer) */ OUT_CS_REG(R300_ZB_ZMASK_OFFSET, 0); OUT_CS_REG(R300_ZB_ZMASK_PITCH, surf->pitch_zmask); } } END_CS; }