static boolean r300_is_simple_msaa_resolve(const struct pipe_blit_info *info)
{
    unsigned dst_width = u_minify(info->dst.resource->width0, info->dst.level);
    unsigned dst_height = u_minify(info->dst.resource->height0, info->dst.level);

    return info->src.resource->nr_samples > 1 &&
           info->dst.resource->nr_samples <= 1 &&
           info->dst.resource->format == info->src.resource->format &&
           info->dst.resource->format == info->dst.format &&
           info->src.resource->format == info->src.format &&
           !info->scissor_enable &&
           info->mask == PIPE_MASK_RGBA &&
           dst_width == info->src.resource->width0 &&
           dst_height == info->src.resource->height0 &&
           info->dst.box.x == 0 &&
           info->dst.box.y == 0 &&
           info->dst.box.width == dst_width &&
           info->dst.box.height == dst_height &&
           info->src.box.x == 0 &&
           info->src.box.y == 0 &&
           info->src.box.width == dst_width &&
           info->src.box.height == dst_height &&
           (r300_resource(info->dst.resource)->tex.microtile != RADEON_LAYOUT_LINEAR ||
            r300_resource(info->dst.resource)->tex.macrotile[info->dst.level] != RADEON_LAYOUT_LINEAR);
}
Beispiel #2
0
static void get_rc_constant_state(
    float vec[4],
    struct r300_context * r300,
    struct rc_constant * constant)
{
    struct r300_textures_state* texstate = r300->textures_state.state;
    struct r300_resource *tex;

    assert(constant->Type == RC_CONSTANT_STATE);

    /* vec should either be (0, 0, 0, 1), which should be a relatively safe
     * RGBA or STRQ value, or it could be one of the RC_CONSTANT_STATE
     * state factors. */

    switch (constant->u.State[0]) {
        /* Factor for converting rectangle coords to
         * normalized coords. Should only show up on non-r500. */
        case RC_STATE_R300_TEXRECT_FACTOR:
            tex = r300_resource(texstate->sampler_views[constant->u.State[1]]->base.texture);
            vec[0] = 1.0 / tex->tex.width0;
            vec[1] = 1.0 / tex->tex.height0;
            vec[2] = 0;
            vec[3] = 1;
            break;

        case RC_STATE_R300_TEXSCALE_FACTOR:
            tex = r300_resource(texstate->sampler_views[constant->u.State[1]]->base.texture);
            /* Add a small number to the texture size to work around rounding errors in hw. */
            vec[0] = tex->b.b.width0  / (tex->tex.width0  + 0.001f);
            vec[1] = tex->b.b.height0 / (tex->tex.height0 + 0.001f);
            vec[2] = tex->b.b.depth0  / (tex->tex.depth0  + 0.001f);
            vec[3] = 1;
            break;

        case RC_STATE_R300_VIEWPORT_SCALE:
            vec[0] = r300->viewport.scale[0];
            vec[1] = r300->viewport.scale[1];
            vec[2] = r300->viewport.scale[2];
            vec[3] = 1;
            break;

        case RC_STATE_R300_VIEWPORT_OFFSET:
            vec[0] = r300->viewport.translate[0];
            vec[1] = r300->viewport.translate[1];
            vec[2] = r300->viewport.translate[2];
            vec[3] = 1;
            break;

        default:
            fprintf(stderr, "r300: Implementation error: "
                "Unknown RC_CONSTANT type %d\n", constant->u.State[0]);
            vec[0] = 0;
            vec[1] = 0;
            vec[2] = 0;
            vec[3] = 1;
    }
}
static void *
r300_buffer_transfer_map( struct pipe_context *pipe,
			  struct pipe_transfer *transfer )
{
    struct r300_context *r300 = r300_context(pipe);
    struct r300_screen *r300screen = r300_screen(pipe->screen);
    struct radeon_winsys *rws = r300screen->rws;
    struct r300_resource *rbuf = r300_resource(transfer->resource);
    uint8_t *map;
    enum pipe_transfer_usage usage;

    if (rbuf->b.b.user_ptr)
        return rbuf->b.b.user_ptr + transfer->box.x;
    if (rbuf->constant_buffer)
        return (uint8_t *) rbuf->constant_buffer + transfer->box.x;

    /* Buffers are never used for write, therefore mapping for read can be
     * unsynchronized. */
    usage = transfer->usage;
    if (!(usage & PIPE_TRANSFER_WRITE)) {
       usage |= PIPE_TRANSFER_UNSYNCHRONIZED;
    }

    map = rws->buffer_map(rbuf->cs_buf, r300->cs, usage);

    if (map == NULL)
        return NULL;

    return map + transfer->box.x;
}
Beispiel #4
0
void* r300_texture_transfer_map(struct pipe_context *ctx,
				struct pipe_transfer *transfer)
{
    struct r300_context *r300 = r300_context(ctx);
    struct radeon_winsys *rws = (struct radeon_winsys *)ctx->winsys;
    struct r300_transfer *r300transfer = r300_transfer(transfer);
    struct r300_resource *tex = r300_resource(transfer->resource);
    char *map;
    enum pipe_format format = tex->b.b.b.format;

    if (r300transfer->linear_texture) {
        /* The detiled texture is of the same size as the region being mapped
         * (no offset needed). */
        return rws->buffer_map(r300transfer->linear_texture->buf,
                               r300->cs,
                               transfer->usage);
    } else {
        /* Tiling is disabled. */
        map = rws->buffer_map(tex->buf, r300->cs,
                              transfer->usage);

        if (!map) {
            return NULL;
        }

        return map + r300_transfer(transfer)->offset +
            transfer->box.y / util_format_get_blockheight(format) * transfer->stride +
            transfer->box.x / util_format_get_blockwidth(format) * util_format_get_blocksize(format);
    }
}
Beispiel #5
0
static boolean r300_hiz_clear_allowed(struct r300_context *r300)
{
    struct pipe_framebuffer_state *fb =
        (struct pipe_framebuffer_state*)r300->fb_state.state;

    return r300_resource(fb->zsbuf->texture)->tex.hiz_dwords[fb->zsbuf->u.tex.level] != 0;
}
static boolean r300_fast_zclear_allowed(struct r300_context *r300,
                                        unsigned clear_buffers)
{
    struct pipe_framebuffer_state *fb =
        (struct pipe_framebuffer_state*)r300->fb_state.state;

    return r300_resource(fb->zsbuf->texture)->tex.zmask_dwords[fb->zsbuf->u.tex.level] != 0;
}
Beispiel #7
0
static void r300_buffer_destroy(struct pipe_screen *screen,
				struct pipe_resource *buf)
{
    struct r300_resource *rbuf = r300_resource(buf);

    align_free(rbuf->malloced_buffer);

    if (rbuf->buf)
        pb_reference(&rbuf->buf, NULL);

    FREE(rbuf);
}
Beispiel #8
0
void r300_texture_transfer_unmap(struct pipe_context *ctx,
				 struct pipe_transfer *transfer)
{
    struct radeon_winsys *rws = (struct radeon_winsys *)ctx->winsys;
    struct r300_transfer *r300transfer = r300_transfer(transfer);
    struct r300_resource *tex = r300_resource(transfer->resource);

    if (r300transfer->linear_texture) {
        rws->buffer_unmap(r300transfer->linear_texture->buf);
    } else {
        rws->buffer_unmap(tex->buf);
    }
}
static void r300_buffer_destroy(struct pipe_screen *screen,
				struct pipe_resource *buf)
{
    struct r300_screen *r300screen = r300_screen(screen);
    struct r300_resource *rbuf = r300_resource(buf);

    if (rbuf->constant_buffer)
        FREE(rbuf->constant_buffer);

    if (rbuf->buf)
        pb_reference(&rbuf->buf, NULL);

    util_slab_free(&r300screen->pool_buffers, rbuf);
}
Beispiel #10
0
static void r300_render_draw_elements(struct vbuf_render* render,
                                      const ushort* indices,
                                      uint count)
{
    struct r300_render* r300render = r300_render(render);
    struct r300_context* r300 = r300render->r300;
    unsigned max_index = (r300->vbo->size - r300->draw_vbo_offset) /
                         (r300render->r300->vertex_info.size * 4) - 1;
    struct pipe_resource *index_buffer = NULL;
    unsigned index_buffer_offset;

    CS_LOCALS(r300);
    DBG(r300, DBG_DRAW, "r300: render_draw_elements (count: %d)\n", count);

    u_upload_data(r300->uploader, 0, count * 2, indices,
                  &index_buffer_offset, &index_buffer);
    if (!index_buffer) {
        return;
    }

    if (!r300_prepare_for_rendering(r300,
                                    PREP_EMIT_STATES |
                                    PREP_EMIT_VARRAYS_SWTCL | PREP_INDEXED,
                                    index_buffer, 12, 0, 0, -1)) {
        pipe_resource_reference(&index_buffer, NULL);
        return;
    }

    BEGIN_CS(12);
    OUT_CS_REG(R300_GA_COLOR_CONTROL,
               r300_provoking_vertex_fixes(r300, r300render->prim));
    OUT_CS_REG(R300_VAP_VF_MAX_VTX_INDX, max_index);

    OUT_CS_PKT3(R300_PACKET3_3D_DRAW_INDX_2, 0);
    OUT_CS(R300_VAP_VF_CNTL__PRIM_WALK_INDICES | (count << 16) |
           r300render->hwprim);

    OUT_CS_PKT3(R300_PACKET3_INDX_BUFFER, 2);
    OUT_CS(R300_INDX_BUFFER_ONE_REG_WR | (R300_VAP_PORT_IDX0 >> 2));
    OUT_CS(index_buffer_offset);
    OUT_CS((count + 1) / 2);
    OUT_CS_RELOC(r300_resource(index_buffer));
    END_CS;

    pipe_resource_reference(&index_buffer, NULL);
}
Beispiel #11
0
void r300_texture_transfer_unmap(struct pipe_context *ctx,
				 struct pipe_transfer *transfer)
{
    struct radeon_winsys *rws = r300_context(ctx)->rws;
    struct r300_transfer *trans = r300_transfer(transfer);
    struct r300_resource *tex = r300_resource(transfer->resource);

    if (trans->linear_texture) {
        if (transfer->usage & PIPE_TRANSFER_WRITE) {
            r300_copy_into_tiled_texture(ctx, trans);
        }

        pipe_resource_reference(
            (struct pipe_resource**)&trans->linear_texture, NULL);
    }
    FREE(transfer);
}
Beispiel #12
0
void r300_emit_cmask_clear(struct r300_context *r300, unsigned size, void *state)
{
    struct pipe_framebuffer_state *fb =
        (struct pipe_framebuffer_state*)r300->fb_state.state;
    struct r300_resource *tex;
    CS_LOCALS(r300);

    tex = r300_resource(fb->cbufs[0]->texture);

    BEGIN_CS(size);
    OUT_CS_PKT3(R300_PACKET3_3D_CLEAR_CMASK, 2);
    OUT_CS(0);
    OUT_CS(tex->tex.cmask_dwords);
    OUT_CS(0);
    END_CS;

    /* Mark the current zbuffer's zmask as in use. */
    r300->cmask_in_use = TRUE;
    r300_mark_fb_state_dirty(r300, R300_CHANGED_CMASK_ENABLE);
}
Beispiel #13
0
void r300_emit_zmask_clear(struct r300_context *r300, unsigned size, void *state)
{
    struct pipe_framebuffer_state *fb =
        (struct pipe_framebuffer_state*)r300->fb_state.state;
    struct r300_resource *tex;
    CS_LOCALS(r300);

    tex = r300_resource(fb->zsbuf->texture);

    BEGIN_CS(size);
    OUT_CS_PKT3(R300_PACKET3_3D_CLEAR_ZMASK, 2);
    OUT_CS(0);
    OUT_CS(tex->tex.zmask_dwords[fb->zsbuf->u.tex.level]);
    OUT_CS(0);
    END_CS;

    /* Mark the current zbuffer's zmask as in use. */
    r300->zmask_in_use = TRUE;
    r300_mark_atom_dirty(r300, &r300->hyperz_state);
}
Beispiel #14
0
void r300_emit_textures_state(struct r300_context *r300,
                              unsigned size, void *state)
{
    struct r300_textures_state *allstate = (struct r300_textures_state*)state;
    struct r300_texture_sampler_state *texstate;
    struct r300_resource *tex;
    unsigned i;
    boolean has_us_format = r300->screen->caps.has_us_format;
    CS_LOCALS(r300);

    BEGIN_CS(size);
    OUT_CS_REG(R300_TX_ENABLE, allstate->tx_enable);

    for (i = 0; i < allstate->count; i++) {
        if ((1 << i) & allstate->tx_enable) {
            texstate = &allstate->regs[i];
            tex = r300_resource(allstate->sampler_views[i]->base.texture);

            OUT_CS_REG(R300_TX_FILTER0_0 + (i * 4), texstate->filter0);
            OUT_CS_REG(R300_TX_FILTER1_0 + (i * 4), texstate->filter1);
            OUT_CS_REG(R300_TX_BORDER_COLOR_0 + (i * 4),
                       texstate->border_color);

            OUT_CS_REG(R300_TX_FORMAT0_0 + (i * 4), texstate->format.format0);
            OUT_CS_REG(R300_TX_FORMAT1_0 + (i * 4), texstate->format.format1);
            OUT_CS_REG(R300_TX_FORMAT2_0 + (i * 4), texstate->format.format2);

            OUT_CS_REG(R300_TX_OFFSET_0 + (i * 4), texstate->format.tile_config);
            OUT_CS_RELOC(tex);

            if (has_us_format) {
                OUT_CS_REG(R500_US_FORMAT0_0 + (i * 4),
                           texstate->format.us_format0);
            }
        }
    }
    END_CS;
}
Beispiel #15
0
void r300_emit_vertex_arrays_swtcl(struct r300_context *r300, boolean indexed)
{
    CS_LOCALS(r300);

    DBG(r300, DBG_SWTCL, "r300: Preparing vertex buffer %p for render, "
            "vertex size %d\n", r300->vbo,
            r300->vertex_info.size);
    /* Set the pointer to our vertex buffer. The emitted values are this:
     * PACKET3 [3D_LOAD_VBPNTR]
     * COUNT   [1]
     * FORMAT  [size | stride << 8]
     * OFFSET  [offset into BO]
     * VBPNTR  [relocated BO]
     */
    BEGIN_CS(7);
    OUT_CS_PKT3(R300_PACKET3_3D_LOAD_VBPNTR, 3);
    OUT_CS(1 | (!indexed ? R300_VC_FORCE_PREFETCH : 0));
    OUT_CS(r300->vertex_info.size |
            (r300->vertex_info.size << 8));
    OUT_CS(r300->draw_vbo_offset);
    OUT_CS(0);
    OUT_CS_RELOC(r300_resource(r300->vbo));
    END_CS;
}
Beispiel #16
0
void r300_emit_hiz_clear(struct r300_context *r300, unsigned size, void *state)
{
    struct pipe_framebuffer_state *fb =
        (struct pipe_framebuffer_state*)r300->fb_state.state;
    struct r300_resource* tex;
    CS_LOCALS(r300);

    tex = r300_resource(fb->zsbuf->texture);

    BEGIN_CS(size);
    OUT_CS_REG(R300_ZB_ZCACHE_CTLSTAT,
        R300_ZB_ZCACHE_CTLSTAT_ZC_FLUSH_FLUSH_AND_FREE |
        R300_ZB_ZCACHE_CTLSTAT_ZC_FREE_FREE);
    OUT_CS_PKT3(R300_PACKET3_3D_CLEAR_HIZ, 2);
    OUT_CS(0);
    OUT_CS(tex->tex.hiz_dwords[fb->zsbuf->u.tex.level]);
    OUT_CS(r300->hiz_clear_value);
    END_CS;

    /* Mark the current zbuffer's hiz ram as in use. */
    r300->hiz_in_use = TRUE;
    r300->hiz_func = HIZ_FUNC_NONE;
    r300_mark_atom_dirty(r300, &r300->hyperz_state);
}
Beispiel #17
0
static void r300_draw_elements_immediate(struct r300_context *r300,
                                         const struct pipe_draw_info *info)
{
    uint8_t *ptr1;
    uint16_t *ptr2;
    uint32_t *ptr4;
    unsigned index_size = r300->vbuf_mgr->index_buffer.index_size;
    unsigned i, count_dwords = index_size == 4 ? info->count :
                                                 (info->count + 1) / 2;
    CS_LOCALS(r300);

    /* 19 dwords for r300_draw_elements_immediate. Give up if the function fails. */
    if (!r300_prepare_for_rendering(r300,
            PREP_EMIT_STATES | PREP_VALIDATE_VBOS | PREP_EMIT_VARRAYS |
            PREP_INDEXED, NULL, 2+count_dwords, 0, info->index_bias, -1))
        return;

    r300_emit_draw_init(r300, info->mode, info->max_index);

    BEGIN_CS(2 + count_dwords);
    OUT_CS_PKT3(R300_PACKET3_3D_DRAW_INDX_2, count_dwords);

    switch (index_size) {
    case 1:
        ptr1 = r300_resource(r300->vbuf_mgr->index_buffer.buffer)->b.user_ptr;
        ptr1 += info->start;

        OUT_CS(R300_VAP_VF_CNTL__PRIM_WALK_INDICES | (info->count << 16) |
               r300_translate_primitive(info->mode));

        if (info->index_bias && !r300->screen->caps.is_r500) {
            for (i = 0; i < info->count-1; i += 2)
                OUT_CS(((ptr1[i+1] + info->index_bias) << 16) |
                        (ptr1[i]   + info->index_bias));

            if (info->count & 1)
                OUT_CS(ptr1[i] + info->index_bias);
        } else {
            for (i = 0; i < info->count-1; i += 2)
                OUT_CS(((ptr1[i+1]) << 16) |
                        (ptr1[i]  ));

            if (info->count & 1)
                OUT_CS(ptr1[i]);
        }
        break;

    case 2:
        ptr2 = (uint16_t*)r300_resource(r300->vbuf_mgr->index_buffer.buffer)->b.user_ptr;
        ptr2 += info->start;

        OUT_CS(R300_VAP_VF_CNTL__PRIM_WALK_INDICES | (info->count << 16) |
               r300_translate_primitive(info->mode));

        if (info->index_bias && !r300->screen->caps.is_r500) {
            for (i = 0; i < info->count-1; i += 2)
                OUT_CS(((ptr2[i+1] + info->index_bias) << 16) |
                        (ptr2[i]   + info->index_bias));

            if (info->count & 1)
                OUT_CS(ptr2[i] + info->index_bias);
        } else {
            OUT_CS_TABLE(ptr2, count_dwords);
        }
        break;

    case 4:
        ptr4 = (uint32_t*)r300_resource(r300->vbuf_mgr->index_buffer.buffer)->b.user_ptr;
        ptr4 += info->start;

        OUT_CS(R300_VAP_VF_CNTL__PRIM_WALK_INDICES | (info->count << 16) |
               R300_VAP_VF_CNTL__INDEX_SIZE_32bit |
               r300_translate_primitive(info->mode));

        if (info->index_bias && !r300->screen->caps.is_r500) {
            for (i = 0; i < info->count; i++)
                OUT_CS(ptr4[i] + info->index_bias);
        } else {
            OUT_CS_TABLE(ptr4, count_dwords);
        }
        break;
    }
    END_CS;
}
Beispiel #18
0
static void get_external_state(
    struct r300_context* r300,
    struct r300_fragment_program_external_state* state)
{
    struct r300_textures_state *texstate = r300->textures_state.state;
    struct r300_rs_state *rs = r300->rs_state.state;
    unsigned i;

    state->frag_clamp = rs ? rs->rs.clamp_fragment_color : 0;

    for (i = 0; i < texstate->sampler_state_count; i++) {
        struct r300_sampler_state *s = texstate->sampler_states[i];
        struct r300_sampler_view *v = texstate->sampler_views[i];
        struct r300_resource *t;

        if (!s || !v) {
            continue;
        }

        t = r300_resource(v->base.texture);

        if (s->state.compare_mode == PIPE_TEX_COMPARE_R_TO_TEXTURE) {
            state->unit[i].compare_mode_enabled = 1;

            /* Fortunately, no need to translate this. */
            state->unit[i].texture_compare_func = s->state.compare_func;
        }

        state->unit[i].non_normalized_coords = !s->state.normalized_coords;
        state->unit[i].convert_unorm_to_snorm =
                v->base.format == PIPE_FORMAT_RGTC1_SNORM ||
                v->base.format == PIPE_FORMAT_LATC1_SNORM;

        /* Pass texture swizzling to the compiler, some lowering passes need it. */
        if (v->base.format == PIPE_FORMAT_RGTC1_SNORM ||
            v->base.format == PIPE_FORMAT_LATC1_SNORM) {
            unsigned char swizzle[4];

            util_format_compose_swizzles(
                            util_format_description(v->base.format)->swizzle,
                            v->swizzle,
                            swizzle);

            state->unit[i].texture_swizzle =
                    RC_MAKE_SWIZZLE(swizzle[0], swizzle[1],
                                    swizzle[2], swizzle[3]);
        } else if (state->unit[i].compare_mode_enabled) {
            state->unit[i].texture_swizzle =
                RC_MAKE_SWIZZLE(v->swizzle[0], v->swizzle[1],
                                v->swizzle[2], v->swizzle[3]);
        }

        /* XXX this should probably take into account STR, not just S. */
        if (t->tex.is_npot) {
            switch (s->state.wrap_s) {
            case PIPE_TEX_WRAP_REPEAT:
                state->unit[i].wrap_mode = RC_WRAP_REPEAT;
                break;

            case PIPE_TEX_WRAP_MIRROR_REPEAT:
                state->unit[i].wrap_mode = RC_WRAP_MIRRORED_REPEAT;
                break;

            case PIPE_TEX_WRAP_MIRROR_CLAMP:
            case PIPE_TEX_WRAP_MIRROR_CLAMP_TO_EDGE:
            case PIPE_TEX_WRAP_MIRROR_CLAMP_TO_BORDER:
                state->unit[i].wrap_mode = RC_WRAP_MIRRORED_CLAMP;
                break;

            default:
                state->unit[i].wrap_mode = RC_WRAP_NONE;
            }

            if (t->b.b.b.target == PIPE_TEXTURE_3D)
                state->unit[i].clamp_and_scale_before_fetch = TRUE;
        }
    }
}
Beispiel #19
0
static void r300_draw_vbo(struct pipe_context* pipe,
                          const struct pipe_draw_info *dinfo)
{
    struct r300_context* r300 = r300_context(pipe);
    struct pipe_draw_info info = *dinfo;

    info.indexed = info.indexed && r300->vbuf_mgr->index_buffer.buffer;

    if (r300->skip_rendering ||
        !u_trim_pipe_prim(info.mode, &info.count)) {
        return;
    }

    r300_update_derived_state(r300);

    /* Start the vbuf manager and update buffers if needed. */
    if (u_vbuf_draw_begin(r300->vbuf_mgr, &info) & U_VBUF_BUFFERS_UPDATED) {
        r300->vertex_arrays_dirty = TRUE;
    }

    /* Draw. */
    if (info.indexed) {
        unsigned max_count = u_vbuf_draw_max_vertex_count(r300->vbuf_mgr);

        if (!max_count) {
           fprintf(stderr, "r300: Skipping a draw command. There is a buffer "
                   " which is too small to be used for rendering.\n");
           goto done;
        }

        if (max_count == ~0) {
           /* There are no per-vertex vertex elements. Use the hardware maximum. */
           max_count = 0xffffff;
        }

        info.max_index = max_count - 1;
        info.start += r300->vbuf_mgr->index_buffer.offset / r300->vbuf_mgr->index_buffer.index_size;

        if (info.instance_count <= 1) {
            if (info.count <= 8 &&
                r300_resource(r300->vbuf_mgr->index_buffer.buffer)->b.user_ptr) {
                r300_draw_elements_immediate(r300, &info);
            } else {
                r300_draw_elements(r300, &info, -1);
            }
        } else {
            r300_draw_elements_instanced(r300, &info);
        }
    } else {
        if (info.instance_count <= 1) {
            if (immd_is_good_idea(r300, info.count)) {
                r300_draw_arrays_immediate(r300, &info);
            } else {
                r300_draw_arrays(r300, &info, -1);
            }
        } else {
            r300_draw_arrays_instanced(r300, &info);
        }
    }

done:
    u_vbuf_draw_end(r300->vbuf_mgr);
}
Beispiel #20
0
boolean r300_emit_buffer_validate(struct r300_context *r300,
                                  boolean do_validate_vertex_buffers,
                                  struct pipe_resource *index_buffer)
{
    struct pipe_framebuffer_state *fb =
        (struct pipe_framebuffer_state*)r300->fb_state.state;
    struct r300_aa_state *aa = (struct r300_aa_state*)r300->aa_state.state;
    struct r300_textures_state *texstate =
        (struct r300_textures_state*)r300->textures_state.state;
    struct r300_resource *tex;
    unsigned i;
    boolean flushed = FALSE;

validate:
    if (r300->fb_state.dirty) {
        /* Color buffers... */
        for (i = 0; i < fb->nr_cbufs; i++) {
            if (!fb->cbufs[i])
                continue;
            tex = r300_resource(fb->cbufs[i]->texture);
            assert(tex && tex->buf && "cbuf is marked, but NULL!");
            r300->rws->cs_add_reloc(r300->cs, tex->cs_buf,
                                    RADEON_USAGE_READWRITE,
                                    r300_surface(fb->cbufs[i])->domain,
                                    tex->b.b.nr_samples > 1 ?
                                    RADEON_PRIO_COLOR_BUFFER_MSAA :
                                    RADEON_PRIO_COLOR_BUFFER);
        }
        /* ...depth buffer... */
        if (fb->zsbuf) {
            tex = r300_resource(fb->zsbuf->texture);
            assert(tex && tex->buf && "zsbuf is marked, but NULL!");
            r300->rws->cs_add_reloc(r300->cs, tex->cs_buf,
                                    RADEON_USAGE_READWRITE,
                                    r300_surface(fb->zsbuf)->domain,
                                    tex->b.b.nr_samples > 1 ?
                                    RADEON_PRIO_DEPTH_BUFFER_MSAA :
                                    RADEON_PRIO_DEPTH_BUFFER);
        }
    }
    /* The AA resolve buffer. */
    if (r300->aa_state.dirty) {
        if (aa->dest) {
            r300->rws->cs_add_reloc(r300->cs, aa->dest->cs_buf,
                                    RADEON_USAGE_WRITE,
                                    aa->dest->domain,
                                    RADEON_PRIO_COLOR_BUFFER);
        }
    }
    if (r300->textures_state.dirty) {
        /* ...textures... */
        for (i = 0; i < texstate->count; i++) {
            if (!(texstate->tx_enable & (1 << i))) {
                continue;
            }

            tex = r300_resource(texstate->sampler_views[i]->base.texture);
            r300->rws->cs_add_reloc(r300->cs, tex->cs_buf, RADEON_USAGE_READ,
                                    tex->domain, RADEON_PRIO_SHADER_TEXTURE_RO);
        }
    }
    /* ...occlusion query buffer... */
    if (r300->query_current)
        r300->rws->cs_add_reloc(r300->cs, r300->query_current->cs_buf,
                                RADEON_USAGE_WRITE, RADEON_DOMAIN_GTT,
                                RADEON_PRIO_MIN);
    /* ...vertex buffer for SWTCL path... */
    if (r300->vbo_cs)
        r300->rws->cs_add_reloc(r300->cs, r300->vbo_cs,
                                RADEON_USAGE_READ, RADEON_DOMAIN_GTT,
                                RADEON_PRIO_MIN);
    /* ...vertex buffers for HWTCL path... */
    if (do_validate_vertex_buffers && r300->vertex_arrays_dirty) {
        struct pipe_vertex_buffer *vbuf = r300->vertex_buffer;
        struct pipe_vertex_buffer *last = r300->vertex_buffer +
                                      r300->nr_vertex_buffers;
        struct pipe_resource *buf;

        for (; vbuf != last; vbuf++) {
            buf = vbuf->buffer;
            if (!buf)
                continue;

            r300->rws->cs_add_reloc(r300->cs, r300_resource(buf)->cs_buf,
                                    RADEON_USAGE_READ,
                                    r300_resource(buf)->domain,
                                    RADEON_PRIO_SHADER_BUFFER_RO);
        }
    }
    /* ...and index buffer for HWTCL path. */
    if (index_buffer)
        r300->rws->cs_add_reloc(r300->cs, r300_resource(index_buffer)->cs_buf,
                                RADEON_USAGE_READ,
                                r300_resource(index_buffer)->domain,
                                RADEON_PRIO_MIN);

    /* Now do the validation (flush is called inside cs_validate on failure). */
    if (!r300->rws->cs_validate(r300->cs)) {
        /* Ooops, an infinite loop, give up. */
        if (flushed)
            return FALSE;

        flushed = TRUE;
        goto validate;
    }

    return TRUE;
}
/* Clear currently bound buffers. */
static void r300_clear(struct pipe_context* pipe,
                       unsigned buffers,
                       const union pipe_color_union *color,
                       double depth,
                       unsigned stencil)
{
    /* My notes about Zbuffer compression:
     *
     * 1) The zbuffer must be micro-tiled and whole microtiles must be
     *    written if compression is enabled. If microtiling is disabled,
     *    it locks up.
     *
     * 2) There is ZMASK RAM which contains a compressed zbuffer.
     *    Each dword of the Z Mask contains compression information
     *    for 16 4x4 pixel tiles, that is 2 bits for each tile.
     *    On chips with 2 Z pipes, every other dword maps to a different
     *    pipe. On newer chipsets, there is a new compression mode
     *    with 8x8 pixel tiles per 2 bits.
     *
     * 3) The FASTFILL bit has nothing to do with filling. It only tells hw
     *    it should look in the ZMASK RAM first before fetching from a real
     *    zbuffer.
     *
     * 4) If a pixel is in a cleared state, ZB_DEPTHCLEARVALUE is returned
     *    during zbuffer reads instead of the value that is actually stored
     *    in the zbuffer memory. A pixel is in a cleared state when its ZMASK
     *    is equal to 0. Therefore, if you clear ZMASK with zeros, you may
     *    leave the zbuffer memory uninitialized, but then you must enable
     *    compression, so that the ZMASK RAM is actually used.
     *
     * 5) Each 4x4 (or 8x8) tile is automatically decompressed and recompressed
     *    during zbuffer updates. A special decompressing operation should be
     *    used to fully decompress a zbuffer, which basically just stores all
     *    compressed tiles in ZMASK to the zbuffer memory.
     *
     * 6) For a 16-bit zbuffer, compression causes a hung with one or
     *    two samples and should not be used.
     *
     * 7) FORCE_COMPRESSED_STENCIL_VALUE should be enabled for stencil clears
     *    to avoid needless decompression.
     *
     * 8) Fastfill must not be used if reading of compressed Z data is disabled
     *    and writing of compressed Z data is enabled (RD/WR_COMP_ENABLE),
     *    i.e. it cannot be used to compress the zbuffer.
     *
     * 9) ZB_CB_CLEAR does not interact with zbuffer compression in any way.
     *
     * - Marek
     */

    struct r300_context* r300 = r300_context(pipe);
    struct pipe_framebuffer_state *fb =
        (struct pipe_framebuffer_state*)r300->fb_state.state;
    struct r300_hyperz_state *hyperz =
        (struct r300_hyperz_state*)r300->hyperz_state.state;
    uint32_t width = fb->width;
    uint32_t height = fb->height;
    uint32_t hyperz_dcv = hyperz->zb_depthclearvalue;

    /* Use fast Z clear.
     * The zbuffer must be in micro-tiled mode, otherwise it locks up. */
    if (buffers & PIPE_CLEAR_DEPTHSTENCIL) {
        boolean zmask_clear, hiz_clear;

        /* If both depth and stencil are present, they must be cleared together. */
        if (fb->zsbuf->texture->format == PIPE_FORMAT_S8_UINT_Z24_UNORM &&
            (buffers & PIPE_CLEAR_DEPTHSTENCIL) != PIPE_CLEAR_DEPTHSTENCIL) {
            zmask_clear = FALSE;
            hiz_clear = FALSE;
        } else {
            zmask_clear = r300_fast_zclear_allowed(r300, buffers);
            hiz_clear = r300_hiz_clear_allowed(r300);
        }

        /* If we need Hyper-Z. */
        if (zmask_clear || hiz_clear) {
            /* Try to obtain the access to Hyper-Z buffers if we don't have one. */
            if (!r300->hyperz_enabled &&
                (r300->screen->caps.is_r500 || debug_get_option_hyperz())) {
                r300->hyperz_enabled =
                    r300->rws->cs_request_feature(r300->cs,
                                                RADEON_FID_R300_HYPERZ_ACCESS,
                                                TRUE);
                if (r300->hyperz_enabled) {
                   /* Need to emit HyperZ buffer regs for the first time. */
                   r300_mark_fb_state_dirty(r300, R300_CHANGED_HYPERZ_FLAG);
                }
            }

            /* Setup Hyper-Z clears. */
            if (r300->hyperz_enabled) {
                if (zmask_clear) {
                    hyperz_dcv = hyperz->zb_depthclearvalue =
                        r300_depth_clear_value(fb->zsbuf->format, depth, stencil);

                    r300_mark_atom_dirty(r300, &r300->zmask_clear);
                    r300_mark_atom_dirty(r300, &r300->gpu_flush);
                    buffers &= ~PIPE_CLEAR_DEPTHSTENCIL;
                }

                if (hiz_clear) {
                    r300->hiz_clear_value = r300_hiz_clear_value(depth);
                    r300_mark_atom_dirty(r300, &r300->hiz_clear);
                    r300_mark_atom_dirty(r300, &r300->gpu_flush);
                }
                r300->num_z_clears++;
            }
        }
    }

    /* Use fast color clear for an AA colorbuffer.
     * The CMASK is shared between all colorbuffers, so we use it
     * if there is only one colorbuffer bound. */
    if ((buffers & PIPE_CLEAR_COLOR) && fb->nr_cbufs == 1 && fb->cbufs[0] &&
        r300_resource(fb->cbufs[0]->texture)->tex.cmask_dwords) {
        /* Try to obtain the access to the CMASK if we don't have one. */
        if (!r300->cmask_access) {
            r300->cmask_access =
                r300->rws->cs_request_feature(r300->cs,
                                              RADEON_FID_R300_CMASK_ACCESS,
                                              TRUE);
        }

        /* Setup the clear. */
        if (r300->cmask_access) {
            /* Pair the resource with the CMASK to avoid other resources
             * accessing it. */
            if (!r300->screen->cmask_resource) {
                pipe_mutex_lock(r300->screen->cmask_mutex);
                /* Double checking (first unlocked, then locked). */
                if (!r300->screen->cmask_resource) {
                    /* Don't reference this, so that the texture can be
                     * destroyed while set in cmask_resource.
                     * Then in texture_destroy, we set cmask_resource to NULL. */
                    r300->screen->cmask_resource = fb->cbufs[0]->texture;
                }
                pipe_mutex_unlock(r300->screen->cmask_mutex);
            }

            if (r300->screen->cmask_resource == fb->cbufs[0]->texture) {
                r300_set_clear_color(r300, color);
                r300_mark_atom_dirty(r300, &r300->cmask_clear);
                r300_mark_atom_dirty(r300, &r300->gpu_flush);
                buffers &= ~PIPE_CLEAR_COLOR;
            }
        }
    }
    /* Enable CBZB clear. */
    else if (r300_cbzb_clear_allowed(r300, buffers)) {
        struct r300_surface *surf = r300_surface(fb->cbufs[0]);

        hyperz->zb_depthclearvalue =
                r300_depth_clear_cb_value(surf->base.format, color->f);

        width = surf->cbzb_width;
        height = surf->cbzb_height;

        r300->cbzb_clear = TRUE;
        r300_mark_fb_state_dirty(r300, R300_CHANGED_HYPERZ_FLAG);
    }

    /* Clear. */
    if (buffers) {
        /* Clear using the blitter. */
        r300_blitter_begin(r300, R300_CLEAR);
        util_blitter_clear(r300->blitter, width, height, 1,
                           buffers, color, depth, stencil);
        r300_blitter_end(r300);
    } else if (r300->zmask_clear.dirty ||
               r300->hiz_clear.dirty ||
               r300->cmask_clear.dirty) {
        /* Just clear zmask and hiz now, this does not use the standard draw
         * procedure. */
        /* Calculate zmask_clear and hiz_clear atom sizes. */
        unsigned dwords =
            r300->gpu_flush.size +
            (r300->zmask_clear.dirty ? r300->zmask_clear.size : 0) +
            (r300->hiz_clear.dirty ? r300->hiz_clear.size : 0) +
            (r300->cmask_clear.dirty ? r300->cmask_clear.size : 0) +
            r300_get_num_cs_end_dwords(r300);

        /* Reserve CS space. */
        if (dwords > (RADEON_MAX_CMDBUF_DWORDS - r300->cs->cdw)) {
            r300_flush(&r300->context, RADEON_FLUSH_ASYNC, NULL);
        }

        /* Emit clear packets. */
        r300_emit_gpu_flush(r300, r300->gpu_flush.size, r300->gpu_flush.state);
        r300->gpu_flush.dirty = FALSE;

        if (r300->zmask_clear.dirty) {
            r300_emit_zmask_clear(r300, r300->zmask_clear.size,
                                  r300->zmask_clear.state);
            r300->zmask_clear.dirty = FALSE;
        }
        if (r300->hiz_clear.dirty) {
            r300_emit_hiz_clear(r300, r300->hiz_clear.size,
                                r300->hiz_clear.state);
            r300->hiz_clear.dirty = FALSE;
        }
        if (r300->cmask_clear.dirty) {
            r300_emit_cmask_clear(r300, r300->cmask_clear.size,
                                  r300->cmask_clear.state);
            r300->cmask_clear.dirty = FALSE;
        }
    } else {
        assert(0);
    }

    /* Disable CBZB clear. */
    if (r300->cbzb_clear) {
        r300->cbzb_clear = FALSE;
        hyperz->zb_depthclearvalue = hyperz_dcv;
        r300_mark_fb_state_dirty(r300, R300_CHANGED_HYPERZ_FLAG);
    }

    /* Enable fastfill and/or hiz.
     *
     * If we cleared zmask/hiz, it's in use now. The Hyper-Z state update
     * looks if zmask/hiz is in use and programs hardware accordingly. */
    if (r300->zmask_in_use || r300->hiz_in_use) {
        r300_mark_atom_dirty(r300, &r300->hyperz_state);
    }
}
Beispiel #22
0
static void r300_emit_draw_elements(struct r300_context *r300,
                                    struct pipe_resource* indexBuffer,
                                    unsigned indexSize,
                                    unsigned max_index,
                                    unsigned mode,
                                    unsigned start,
                                    unsigned count,
                                    uint16_t *imm_indices3)
{
    uint32_t count_dwords, offset_dwords;
    boolean alt_num_verts = count > 65535;
    CS_LOCALS(r300);

    if (count >= (1 << 24)) {
        fprintf(stderr, "r300: Got a huge number of vertices: %i, "
                "refusing to render (max_index: %i).\n", count, max_index);
        return;
    }

    DBG(r300, DBG_DRAW, "r300: Indexbuf of %u indices, max %u\n",
        count, max_index);

    r300_emit_draw_init(r300, mode, max_index);

    /* If start is odd, render the first triangle with indices embedded
     * in the command stream. This will increase start by 3 and make it
     * even. We can then proceed without a fallback. */
    if (indexSize == 2 && (start & 1) &&
        mode == PIPE_PRIM_TRIANGLES) {
        BEGIN_CS(4);
        OUT_CS_PKT3(R300_PACKET3_3D_DRAW_INDX_2, 2);
        OUT_CS(R300_VAP_VF_CNTL__PRIM_WALK_INDICES | (3 << 16) |
               R300_VAP_VF_CNTL__PRIM_TRIANGLES);
        OUT_CS(imm_indices3[1] << 16 | imm_indices3[0]);
        OUT_CS(imm_indices3[2]);
        END_CS;

        start += 3;
        count -= 3;
        if (!count)
           return;
    }

    offset_dwords = indexSize * start / sizeof(uint32_t);

    BEGIN_CS(8 + (alt_num_verts ? 2 : 0));
    if (alt_num_verts) {
        OUT_CS_REG(R500_VAP_ALT_NUM_VERTICES, count);
    }
    OUT_CS_PKT3(R300_PACKET3_3D_DRAW_INDX_2, 0);
    if (indexSize == 4) {
        count_dwords = count;
        OUT_CS(R300_VAP_VF_CNTL__PRIM_WALK_INDICES | (count << 16) |
               R300_VAP_VF_CNTL__INDEX_SIZE_32bit |
               r300_translate_primitive(mode) |
               (alt_num_verts ? R500_VAP_VF_CNTL__USE_ALT_NUM_VERTS : 0));
    } else {
        count_dwords = (count + 1) / 2;
        OUT_CS(R300_VAP_VF_CNTL__PRIM_WALK_INDICES | (count << 16) |
               r300_translate_primitive(mode) |
               (alt_num_verts ? R500_VAP_VF_CNTL__USE_ALT_NUM_VERTS : 0));
    }

    OUT_CS_PKT3(R300_PACKET3_INDX_BUFFER, 2);
    OUT_CS(R300_INDX_BUFFER_ONE_REG_WR | (R300_VAP_PORT_IDX0 >> 2) |
           (0 << R300_INDX_BUFFER_SKIP_SHIFT));
    OUT_CS(offset_dwords << 2);
    OUT_CS(count_dwords);
    OUT_CS_RELOC(r300_resource(indexBuffer));
    END_CS;
}
Beispiel #23
0
static void r300_draw_elements(struct r300_context *r300,
                               const struct pipe_draw_info *info,
                               int instance_id)
{
    struct pipe_resource *indexBuffer = r300->index_buffer.buffer;
    unsigned indexSize = r300->index_buffer.index_size;
    struct pipe_resource* orgIndexBuffer = indexBuffer;
    unsigned start = info->start;
    unsigned count = info->count;
    boolean alt_num_verts = r300->screen->caps.is_r500 &&
                            count > 65536;
    unsigned short_count;
    int buffer_offset = 0, index_offset = 0; /* for index bias emulation */
    uint16_t indices3[3];

    if (info->index_bias && !r300->screen->caps.is_r500) {
        r300_split_index_bias(r300, info->index_bias, &buffer_offset,
                              &index_offset);
    }

    r300_translate_index_buffer(r300, &r300->index_buffer, &indexBuffer,
                                &indexSize, index_offset, &start, count);

    /* Fallback for misaligned ushort indices. */
    if (indexSize == 2 && (start & 1) && indexBuffer) {
        /* If we got here, then orgIndexBuffer == indexBuffer. */
        uint16_t *ptr = r300->rws->buffer_map(r300_resource(orgIndexBuffer)->cs_buf,
                                              r300->cs,
                                              PIPE_TRANSFER_READ |
                                              PIPE_TRANSFER_UNSYNCHRONIZED);

        if (info->mode == PIPE_PRIM_TRIANGLES) {
           memcpy(indices3, ptr + start, 6);
        } else {
            /* Copy the mapped index buffer directly to the upload buffer.
             * The start index will be aligned simply from the fact that
             * every sub-buffer in the upload buffer is aligned. */
            r300_upload_index_buffer(r300, &indexBuffer, indexSize, &start,
                                     count, (uint8_t*)ptr);
        }
    } else {
        if (r300->index_buffer.user_buffer)
            r300_upload_index_buffer(r300, &indexBuffer, indexSize,
                                     &start, count,
                                     r300->index_buffer.user_buffer);
    }

    /* 19 dwords for emit_draw_elements. Give up if the function fails. */
    if (!r300_prepare_for_rendering(r300,
            PREP_EMIT_STATES | PREP_VALIDATE_VBOS | PREP_EMIT_VARRAYS |
            PREP_INDEXED, indexBuffer, 19, buffer_offset, info->index_bias,
            instance_id))
        goto done;

    if (alt_num_verts || count <= 65535) {
        r300_emit_draw_elements(r300, indexBuffer, indexSize,
                                info->max_index, info->mode, start, count,
                                indices3);
    } else {
        do {
            /* The maximum must be divisible by 4 and 3,
             * so that quad and triangle lists are split correctly.
             *
             * Strips, loops, and fans won't work. */
            short_count = MIN2(count, 65532);

            r300_emit_draw_elements(r300, indexBuffer, indexSize,
                                     info->max_index,
                                     info->mode, start, short_count, indices3);

            start += short_count;
            count -= short_count;

            /* 15 dwords for emit_draw_elements */
            if (count) {
                if (!r300_prepare_for_rendering(r300,
                        PREP_VALIDATE_VBOS | PREP_EMIT_VARRAYS | PREP_INDEXED,
                        indexBuffer, 19, buffer_offset, info->index_bias,
                        instance_id))
                    goto done;
            }
        } while (count);
    }

done:
    if (indexBuffer != orgIndexBuffer) {
        pipe_resource_reference( &indexBuffer, NULL );
    }
}
Beispiel #24
0
static void r300_update_hyperz(struct r300_context* r300)
{
    struct r300_hyperz_state *z =
        (struct r300_hyperz_state*)r300->hyperz_state.state;
    struct pipe_framebuffer_state *fb =
        (struct pipe_framebuffer_state*)r300->fb_state.state;
    struct r300_dsa_state *dsa = r300->dsa_state.state;
    struct r300_resource *zstex =
            fb->zsbuf ? r300_resource(fb->zsbuf->texture) : NULL;

    z->gb_z_peq_config = 0;
    z->zb_bw_cntl = 0;
    z->sc_hyperz = R300_SC_HYPERZ_ADJ_2;
    z->flush = 0;

    if (r300->cbzb_clear) {
        z->zb_bw_cntl |= R300_ZB_CB_CLEAR_CACHE_LINE_WRITE_ONLY;
        return;
    }

    if (!zstex || !r300->hyperz_enabled)
        return;

    /* Set the size of ZMASK tiles. */
    if (zstex->tex.zcomp8x8[fb->zsbuf->u.tex.level]) {
        z->gb_z_peq_config |= R300_GB_Z_PEQ_CONFIG_Z_PEQ_SIZE_8_8;
    }

    /* R500-specific features and optimizations. */
    if (r300->screen->caps.is_r500) {
        z->zb_bw_cntl |= R500_PEQ_PACKING_ENABLE |
                         R500_COVERED_PTR_MASKING_ENABLE;
    }

    /* Setup decompression if needed. No other HyperZ setting is required. */
    if (r300->zmask_decompress) {
        z->zb_bw_cntl |= R300_FAST_FILL_ENABLE |
                         R300_RD_COMP_ENABLE;
        return;
    }

    /* Do not set anything if depth and stencil tests are off. */
    if (!dsa->dsa.depth.enabled &&
        !dsa->dsa.stencil[0].enabled &&
        !dsa->dsa.stencil[1].enabled) {
        assert(!dsa->dsa.depth.writemask);
        return;
    }

    /* Zbuffer compression. */
    if (r300->zmask_in_use && !r300->locked_zbuffer) {
        z->zb_bw_cntl |= R300_FAST_FILL_ENABLE |
                         R300_RD_COMP_ENABLE |
                         R300_WR_COMP_ENABLE;
    }

    /* HiZ. */
    if (r300->hiz_in_use && !r300->locked_zbuffer) {
        /* HiZ cannot be used under some circumstances. */
        if (!r300_hiz_allowed(r300)) {
            /* If writemask is disabled, the HiZ memory will not be changed,
             * so we can keep its content for later. */
            if (dsa->dsa.depth.writemask) {
                r300->hiz_in_use = FALSE;
            }
            return;
        }
        DBG(r300, DBG_HYPERZ, "r300: Z-func: %i\n", dsa->dsa.depth.func);

        /* Set the HiZ function if needed. */
        if (r300->hiz_func == HIZ_FUNC_NONE) {
            r300->hiz_func = r300_get_hiz_func(r300);
        }

        /* Setup the HiZ bits. */
        z->zb_bw_cntl |= R300_HIZ_ENABLE |
                (r300->hiz_func == HIZ_FUNC_MIN ? R300_HIZ_MIN : R300_HIZ_MAX);

        z->sc_hyperz |= R300_SC_HYPERZ_ENABLE |
                        r300_get_sc_hz_max(r300);

        if (r300->screen->caps.is_r500) {
            z->zb_bw_cntl |= R500_HIZ_EQUAL_REJECT_ENABLE;
        }
    }
}
Beispiel #25
0
/* Copy a block of pixels from one surface to another. */
static void r300_resource_copy_region(struct pipe_context *pipe,
                                      struct pipe_resource *dst,
                                      unsigned dst_level,
                                      unsigned dstx, unsigned dsty, unsigned dstz,
                                      struct pipe_resource *src,
                                      unsigned src_level,
                                      const struct pipe_box *src_box)
{
    struct pipe_screen *screen = pipe->screen;
    struct r300_context *r300 = r300_context(pipe);
    struct pipe_framebuffer_state *fb =
        (struct pipe_framebuffer_state*)r300->fb_state.state;
    unsigned src_width0 = r300_resource(src)->tex.width0;
    unsigned src_height0 = r300_resource(src)->tex.height0;
    unsigned dst_width0 = r300_resource(dst)->tex.width0;
    unsigned dst_height0 = r300_resource(dst)->tex.height0;
    unsigned layout;
    struct pipe_box box;
    struct pipe_sampler_view src_templ, *src_view;
    struct pipe_surface dst_templ, *dst_view;

    /* Fallback for buffers. */
    if ((dst->target == PIPE_BUFFER && src->target == PIPE_BUFFER) ||
        !r300_is_blit_supported(dst->format)) {
        util_resource_copy_region(pipe, dst, dst_level, dstx, dsty, dstz,
                                  src, src_level, src_box);
        return;
    }

    /* The code below changes the texture format so that the copy can be done
     * on hardware. E.g. depth-stencil surfaces are copied as RGBA
     * colorbuffers. */

    util_blitter_default_dst_texture(&dst_templ, dst, dst_level, dstz, src_box);
    util_blitter_default_src_texture(&src_templ, src, src_level);

    layout = util_format_description(dst_templ.format)->layout;

    /* Handle non-renderable plain formats. */
    if (layout == UTIL_FORMAT_LAYOUT_PLAIN &&
        (!screen->is_format_supported(screen, src_templ.format, src->target,
                                      src->nr_samples,
                                      PIPE_BIND_SAMPLER_VIEW) ||
         !screen->is_format_supported(screen, dst_templ.format, dst->target,
                                      dst->nr_samples,
                                      PIPE_BIND_RENDER_TARGET))) {
        switch (util_format_get_blocksize(dst_templ.format)) {
            case 1:
                dst_templ.format = PIPE_FORMAT_I8_UNORM;
                break;
            case 2:
                dst_templ.format = PIPE_FORMAT_B4G4R4A4_UNORM;
                break;
            case 4:
                dst_templ.format = PIPE_FORMAT_B8G8R8A8_UNORM;
                break;
            case 8:
                dst_templ.format = PIPE_FORMAT_R16G16B16A16_UNORM;
                break;
            default:
                debug_printf("r300: copy_region: Unhandled format: %s. Falling back to software.\n"
                             "r300: copy_region: Software fallback doesn't work for tiled textures.\n",
                             util_format_short_name(dst_templ.format));
        }
        src_templ.format = dst_templ.format;
    }

    /* Handle compressed formats. */
    if (layout == UTIL_FORMAT_LAYOUT_S3TC ||
        layout == UTIL_FORMAT_LAYOUT_RGTC) {
        assert(src_templ.format == dst_templ.format);

        box = *src_box;
        src_box = &box;

        dst_width0 = align(dst_width0, 4);
        dst_height0 = align(dst_height0, 4);
        src_width0 = align(src_width0, 4);
        src_height0 = align(src_height0, 4);
        box.width = align(box.width, 4);
        box.height = align(box.height, 4);

        switch (util_format_get_blocksize(dst_templ.format)) {
        case 8:
            /* one 4x4 pixel block has 8 bytes.
             * we set 1 pixel = 4 bytes ===> 1 block corrensponds to 2 pixels. */
            dst_templ.format = PIPE_FORMAT_R8G8B8A8_UNORM;
            dst_width0 = dst_width0 / 2;
            src_width0 = src_width0 / 2;
            dstx /= 2;
            box.x /= 2;
            box.width /= 2;
            break;
        case 16:
            /* one 4x4 pixel block has 16 bytes.
             * we set 1 pixel = 4 bytes ===> 1 block corresponds to 4 pixels. */
            dst_templ.format = PIPE_FORMAT_R8G8B8A8_UNORM;
            break;
        }
        src_templ.format = dst_templ.format;

        dst_height0 = dst_height0 / 4;
        src_height0 = src_height0 / 4;
        dsty /= 4;
        box.y /= 4;
        box.height /= 4;
    }

    /* Fallback for textures. */
    if (!screen->is_format_supported(screen, dst_templ.format,
                                     dst->target, dst->nr_samples,
                                     PIPE_BIND_RENDER_TARGET) ||
	!screen->is_format_supported(screen, src_templ.format,
                                     src->target, src->nr_samples,
                                     PIPE_BIND_SAMPLER_VIEW)) {
        assert(0 && "this shouldn't happen, update r300_is_blit_supported");
        util_resource_copy_region(pipe, dst, dst_level, dstx, dsty, dstz,
                                  src, src_level, src_box);
        return;
    }

    /* Decompress ZMASK. */
    if (r300->zmask_in_use && !r300->locked_zbuffer) {
        if (fb->zsbuf->texture == src ||
            fb->zsbuf->texture == dst) {
            r300_decompress_zmask(r300);
        }
    }

    dst_view = r300_create_surface_custom(pipe, dst, &dst_templ, dst_width0, dst_height0);
    src_view = r300_create_sampler_view_custom(pipe, src, &src_templ, src_width0, src_height0);

    r300_blitter_begin(r300, R300_COPY);
    util_blitter_blit_generic(r300->blitter, dst_view, dstx, dsty,
                              abs(src_box->width), abs(src_box->height),
                              src_view, src_box,
                              src_width0, src_height0, PIPE_MASK_RGBAZS,
                              PIPE_TEX_FILTER_NEAREST, NULL, FALSE);
    r300_blitter_end(r300);

    pipe_surface_reference(&dst_view, NULL);
    pipe_sampler_view_reference(&src_view, NULL);
}
Beispiel #26
0
static void *
r300_buffer_transfer_map( struct pipe_context *context,
                          struct pipe_resource *resource,
                          unsigned level,
                          unsigned usage,
                          const struct pipe_box *box,
                          struct pipe_transfer **ptransfer )
{
    struct r300_context *r300 = r300_context(context);
    struct radeon_winsys *rws = r300->screen->rws;
    struct r300_resource *rbuf = r300_resource(resource);
    struct pipe_transfer *transfer;
    uint8_t *map;

    transfer = util_slab_alloc(&r300->pool_transfers);
    transfer->resource = resource;
    transfer->level = level;
    transfer->usage = usage;
    transfer->box = *box;
    transfer->stride = 0;
    transfer->layer_stride = 0;

    if (rbuf->malloced_buffer) {
        *ptransfer = transfer;
        return rbuf->malloced_buffer + box->x;
    }

    if (usage & PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE &&
        !(usage & PIPE_TRANSFER_UNSYNCHRONIZED)) {
        assert(usage & PIPE_TRANSFER_WRITE);

        /* Check if mapping this buffer would cause waiting for the GPU. */
        if (r300->rws->cs_is_buffer_referenced(r300->cs, rbuf->cs_buf, RADEON_USAGE_READWRITE) ||
            !r300->rws->buffer_wait(rbuf->buf, 0, RADEON_USAGE_READWRITE)) {
            unsigned i;
            struct pb_buffer *new_buf;

            /* Create a new one in the same pipe_resource. */
            new_buf = r300->rws->buffer_create(r300->rws, rbuf->b.b.width0,
                                               R300_BUFFER_ALIGNMENT, TRUE,
                                               rbuf->domain, 0);
            if (new_buf) {
                /* Discard the old buffer. */
                pb_reference(&rbuf->buf, NULL);
                rbuf->buf = new_buf;
                rbuf->cs_buf = r300->rws->buffer_get_cs_handle(rbuf->buf);

                /* We changed the buffer, now we need to bind it where the old one was bound. */
                for (i = 0; i < r300->nr_vertex_buffers; i++) {
                    if (r300->vertex_buffer[i].buffer == &rbuf->b.b) {
                        r300->vertex_arrays_dirty = TRUE;
                        break;
                    }
                }
            }
        }
    }

    /* Buffers are never used for write, therefore mapping for read can be
     * unsynchronized. */
    if (!(usage & PIPE_TRANSFER_WRITE)) {
       usage |= PIPE_TRANSFER_UNSYNCHRONIZED;
    }

    map = rws->buffer_map(rbuf->cs_buf, r300->cs, usage);

    if (map == NULL) {
        util_slab_free(&r300->pool_transfers, transfer);
        return NULL;
    }

    *ptransfer = transfer;
    return map + box->x;
}
Beispiel #27
0
void r300_emit_vertex_arrays(struct r300_context* r300, int offset,
                             boolean indexed, int instance_id)
{
    struct pipe_vertex_buffer *vbuf = r300->vertex_buffer;
    struct pipe_vertex_element *velem = r300->velems->velem;
    struct r300_resource *buf;
    int i;
    unsigned vertex_array_count = r300->velems->count;
    unsigned packet_size = (vertex_array_count * 3 + 1) / 2;
    struct pipe_vertex_buffer *vb1, *vb2;
    unsigned *hw_format_size = r300->velems->format_size;
    unsigned size1, size2, offset1, offset2, stride1, stride2;
    CS_LOCALS(r300);

    BEGIN_CS(2 + packet_size + vertex_array_count * 2);
    OUT_CS_PKT3(R300_PACKET3_3D_LOAD_VBPNTR, packet_size);
    OUT_CS(vertex_array_count | (!indexed ? R300_VC_FORCE_PREFETCH : 0));

    if (instance_id == -1) {
        /* Non-instanced arrays. This ignores instance_divisor and instance_id. */
        for (i = 0; i < vertex_array_count - 1; i += 2) {
            vb1 = &vbuf[velem[i].vertex_buffer_index];
            vb2 = &vbuf[velem[i+1].vertex_buffer_index];
            size1 = hw_format_size[i];
            size2 = hw_format_size[i+1];

            OUT_CS(R300_VBPNTR_SIZE0(size1) | R300_VBPNTR_STRIDE0(vb1->stride) |
                   R300_VBPNTR_SIZE1(size2) | R300_VBPNTR_STRIDE1(vb2->stride));
            OUT_CS(vb1->buffer_offset + velem[i].src_offset   + offset * vb1->stride);
            OUT_CS(vb2->buffer_offset + velem[i+1].src_offset + offset * vb2->stride);
        }

        if (vertex_array_count & 1) {
            vb1 = &vbuf[velem[i].vertex_buffer_index];
            size1 = hw_format_size[i];

            OUT_CS(R300_VBPNTR_SIZE0(size1) | R300_VBPNTR_STRIDE0(vb1->stride));
            OUT_CS(vb1->buffer_offset + velem[i].src_offset + offset * vb1->stride);
        }

        for (i = 0; i < vertex_array_count; i++) {
            buf = r300_resource(vbuf[velem[i].vertex_buffer_index].buffer);
            OUT_CS_RELOC(buf);
        }
    } else {
        /* Instanced arrays. */
        for (i = 0; i < vertex_array_count - 1; i += 2) {
            vb1 = &vbuf[velem[i].vertex_buffer_index];
            vb2 = &vbuf[velem[i+1].vertex_buffer_index];
            size1 = hw_format_size[i];
            size2 = hw_format_size[i+1];

            if (velem[i].instance_divisor) {
                stride1 = 0;
                offset1 = vb1->buffer_offset + velem[i].src_offset +
                          (instance_id / velem[i].instance_divisor) * vb1->stride;
            } else {
                stride1 = vb1->stride;
                offset1 = vb1->buffer_offset + velem[i].src_offset + offset * vb1->stride;
            }
            if (velem[i+1].instance_divisor) {
                stride2 = 0;
                offset2 = vb2->buffer_offset + velem[i+1].src_offset +
                          (instance_id / velem[i+1].instance_divisor) * vb2->stride;
            } else {
                stride2 = vb2->stride;
                offset2 = vb2->buffer_offset + velem[i+1].src_offset + offset * vb2->stride;
            }

            OUT_CS(R300_VBPNTR_SIZE0(size1) | R300_VBPNTR_STRIDE0(stride1) |
                   R300_VBPNTR_SIZE1(size2) | R300_VBPNTR_STRIDE1(stride2));
            OUT_CS(offset1);
            OUT_CS(offset2);
        }

        if (vertex_array_count & 1) {
            vb1 = &vbuf[velem[i].vertex_buffer_index];
            size1 = hw_format_size[i];

            if (velem[i].instance_divisor) {
                stride1 = 0;
                offset1 = vb1->buffer_offset + velem[i].src_offset +
                          (instance_id / velem[i].instance_divisor) * vb1->stride;
            } else {
                stride1 = vb1->stride;
                offset1 = vb1->buffer_offset + velem[i].src_offset + offset * vb1->stride;
            }

            OUT_CS(R300_VBPNTR_SIZE0(size1) | R300_VBPNTR_STRIDE0(stride1));
            OUT_CS(offset1);
        }

        for (i = 0; i < vertex_array_count; i++) {
            buf = r300_resource(vbuf[velem[i].vertex_buffer_index].buffer);
            OUT_CS_RELOC(buf);
        }
    }
    END_CS;
}
Beispiel #28
0
struct pipe_transfer*
r300_texture_get_transfer(struct pipe_context *ctx,
                          struct pipe_resource *texture,
                          unsigned level,
                          unsigned usage,
                          const struct pipe_box *box)
{
    struct r300_context *r300 = r300_context(ctx);
    struct r300_resource *tex = r300_resource(texture);
    struct r300_transfer *trans;
    struct pipe_resource base;
    boolean referenced_cs, referenced_hw, blittable;
    const struct util_format_description *desc =
        util_format_description(texture->format);

    referenced_cs =
        r300->rws->cs_is_buffer_referenced(r300->cs, tex->cs_buf);
    if (referenced_cs) {
        referenced_hw = TRUE;
    } else {
        referenced_hw =
            r300->rws->buffer_is_busy(tex->buf, RADEON_USAGE_READWRITE);
    }

    blittable = desc->layout == UTIL_FORMAT_LAYOUT_PLAIN ||
                desc->layout == UTIL_FORMAT_LAYOUT_S3TC ||
                desc->layout == UTIL_FORMAT_LAYOUT_RGTC;

    trans = CALLOC_STRUCT(r300_transfer);
    if (trans) {
        /* Initialize the transfer object. */
        pipe_resource_reference(&trans->transfer.resource, texture);
        trans->transfer.level = level;
        trans->transfer.usage = usage;
        trans->transfer.box = *box;

        /* If the texture is tiled, we must create a temporary detiled texture
         * for this transfer.
         * Also make write transfers pipelined. */
        if (tex->tex.microtile || tex->tex.macrotile[level] ||
            (referenced_hw && blittable && !(usage & PIPE_TRANSFER_READ))) {
            if (r300->blitter->running) {
                fprintf(stderr, "r300: ERROR: Blitter recursion in texture_get_transfer.\n");
                os_break();
            }

            base.target = PIPE_TEXTURE_2D;
            base.format = texture->format;
            base.width0 = box->width;
            base.height0 = box->height;
            base.depth0 = 1;
            base.array_size = 1;
            base.last_level = 0;
            base.nr_samples = 0;
            base.usage = PIPE_USAGE_DYNAMIC;
            base.bind = 0;
            base.flags = R300_RESOURCE_FLAG_TRANSFER;

            /* For texture reading, the temporary (detiled) texture is used as
             * a render target when blitting from a tiled texture. */
            if (usage & PIPE_TRANSFER_READ) {
                base.bind |= PIPE_BIND_RENDER_TARGET;
            }
            /* For texture writing, the temporary texture is used as a sampler
             * when blitting into a tiled texture. */
            if (usage & PIPE_TRANSFER_WRITE) {
                base.bind |= PIPE_BIND_SAMPLER_VIEW;
            }

            /* Create the temporary texture. */
            trans->linear_texture = r300_resource(
               ctx->screen->resource_create(ctx->screen,
                                            &base));

            if (!trans->linear_texture) {
                /* Oh crap, the thing can't create the texture.
                 * Let's flush and try again. */
                r300_flush(ctx, 0, NULL);

                trans->linear_texture = r300_resource(
                   ctx->screen->resource_create(ctx->screen,
                                                &base));

                if (!trans->linear_texture) {
                    /* For linear textures, it's safe to fallback to
                     * an unpipelined transfer. */
                    if (!tex->tex.microtile && !tex->tex.macrotile[level]) {
                        goto unpipelined;
                    }

                    /* Otherwise, go to hell. */
                    fprintf(stderr,
                        "r300: Failed to create a transfer object, praise.\n");
                    FREE(trans);
                    return NULL;
                }
            }

            assert(!trans->linear_texture->tex.microtile &&
                   !trans->linear_texture->tex.macrotile[0]);

            /* Set the stride. */
            trans->transfer.stride =
                    trans->linear_texture->tex.stride_in_bytes[0];

            if (usage & PIPE_TRANSFER_READ) {
                /* We cannot map a tiled texture directly because the data is
                 * in a different order, therefore we do detiling using a blit. */
                r300_copy_from_tiled_texture(ctx, trans);

                /* Always referenced in the blit. */
                r300_flush(ctx, 0, NULL);
            }
            return &trans->transfer;
        }

    unpipelined:
        /* Unpipelined transfer. */
        trans->transfer.stride = tex->tex.stride_in_bytes[level];
        trans->offset = r300_texture_get_offset(tex, level, box->z);

        if (referenced_cs &&
            !(usage & PIPE_TRANSFER_UNSYNCHRONIZED))
            r300_flush(ctx, 0, NULL);
        return &trans->transfer;
    }
    return NULL;
}
Beispiel #29
0
static void r300_draw_arrays_immediate(struct r300_context *r300,
                                       const struct pipe_draw_info *info)
{
    struct pipe_vertex_element* velem;
    struct pipe_vertex_buffer* vbuf;
    unsigned vertex_element_count = r300->velems->count;
    unsigned i, v, vbi;

    /* Size of the vertex, in dwords. */
    unsigned vertex_size = r300->velems->vertex_size_dwords;

    /* The number of dwords for this draw operation. */
    unsigned dwords = 4 + info->count * vertex_size;

    /* Size of the vertex element, in dwords. */
    unsigned size[PIPE_MAX_ATTRIBS];

    /* Stride to the same attrib in the next vertex in the vertex buffer,
     * in dwords. */
    unsigned stride[PIPE_MAX_ATTRIBS];

    /* Mapped vertex buffers. */
    uint32_t* map[PIPE_MAX_ATTRIBS] = {0};
    uint32_t* mapelem[PIPE_MAX_ATTRIBS];

    CS_LOCALS(r300);

    if (!r300_prepare_for_rendering(r300, PREP_EMIT_STATES, NULL, dwords, 0, 0, -1))
        return;

    /* Calculate the vertex size, offsets, strides etc. and map the buffers. */
    for (i = 0; i < vertex_element_count; i++) {
        velem = &r300->velems->velem[i];
        size[i] = r300->velems->format_size[i] / 4;
        vbi = velem->vertex_buffer_index;
        vbuf = &r300->vertex_buffer[vbi];
        stride[i] = vbuf->stride / 4;

        /* Map the buffer. */
        if (!map[vbi]) {
            map[vbi] = (uint32_t*)r300->rws->buffer_map(
                r300_resource(vbuf->buffer)->cs_buf,
                r300->cs, PIPE_TRANSFER_READ | PIPE_TRANSFER_UNSYNCHRONIZED);
            map[vbi] += (vbuf->buffer_offset / 4) + stride[i] * info->start;
        }
        mapelem[i] = map[vbi] + (velem->src_offset / 4);
    }

    r300_emit_draw_init(r300, info->mode, info->count-1);

    BEGIN_CS(dwords);
    OUT_CS_REG(R300_VAP_VTX_SIZE, vertex_size);
    OUT_CS_PKT3(R300_PACKET3_3D_DRAW_IMMD_2, info->count * vertex_size);
    OUT_CS(R300_VAP_VF_CNTL__PRIM_WALK_VERTEX_EMBEDDED | (info->count << 16) |
            r300_translate_primitive(info->mode));

    /* Emit vertices. */
    for (v = 0; v < info->count; v++) {
        for (i = 0; i < vertex_element_count; i++) {
            OUT_CS_TABLE(&mapelem[i][stride[i] * v], size[i]);
        }
    }
    END_CS;
}
Beispiel #30
0
void *
r300_texture_transfer_map(struct pipe_context *ctx,
                          struct pipe_resource *texture,
                          unsigned level,
                          unsigned usage,
                          const struct pipe_box *box,
                          struct pipe_transfer **transfer)
{
    struct r300_context *r300 = r300_context(ctx);
    struct r300_resource *tex = r300_resource(texture);
    struct r300_transfer *trans;
    struct pipe_resource base;
    boolean referenced_cs, referenced_hw;
    enum pipe_format format = tex->b.b.format;
    char *map;

    referenced_cs =
        r300->rws->cs_is_buffer_referenced(r300->cs, tex->cs_buf, RADEON_USAGE_READWRITE);
    if (referenced_cs) {
        referenced_hw = TRUE;
    } else {
        referenced_hw =
            r300->rws->buffer_is_busy(tex->buf, RADEON_USAGE_READWRITE);
    }

    trans = CALLOC_STRUCT(r300_transfer);
    if (trans) {
        /* Initialize the transfer object. */
        trans->transfer.resource = texture;
        trans->transfer.level = level;
        trans->transfer.usage = usage;
        trans->transfer.box = *box;

        /* If the texture is tiled, we must create a temporary detiled texture
         * for this transfer.
         * Also make write transfers pipelined. */
        if (tex->tex.microtile || tex->tex.macrotile[level] ||
            (referenced_hw && !(usage & PIPE_TRANSFER_READ) &&
             r300_is_blit_supported(texture->format))) {
            if (r300->blitter->running) {
                fprintf(stderr, "r300: ERROR: Blitter recursion in texture_get_transfer.\n");
                os_break();
            }

            base.target = PIPE_TEXTURE_2D;
            base.format = texture->format;
            base.width0 = box->width;
            base.height0 = box->height;
            base.depth0 = 1;
            base.array_size = 1;
            base.last_level = 0;
            base.nr_samples = 0;
            base.usage = PIPE_USAGE_STAGING;
            base.bind = 0;
            if (usage & PIPE_TRANSFER_READ) {
                base.bind |= PIPE_BIND_SAMPLER_VIEW;
            }
            if (usage & PIPE_TRANSFER_WRITE) {
                base.bind |= PIPE_BIND_RENDER_TARGET;
            }
            base.flags = R300_RESOURCE_FLAG_TRANSFER;

            /* For texture reading, the temporary (detiled) texture is used as
             * a render target when blitting from a tiled texture. */
            if (usage & PIPE_TRANSFER_READ) {
                base.bind |= PIPE_BIND_RENDER_TARGET;
            }
            /* For texture writing, the temporary texture is used as a sampler
             * when blitting into a tiled texture. */
            if (usage & PIPE_TRANSFER_WRITE) {
                base.bind |= PIPE_BIND_SAMPLER_VIEW;
            }

            /* Create the temporary texture. */
            trans->linear_texture = r300_resource(
               ctx->screen->resource_create(ctx->screen,
                                            &base));

            if (!trans->linear_texture) {
                /* Oh crap, the thing can't create the texture.
                 * Let's flush and try again. */
                r300_flush(ctx, 0, NULL);

                trans->linear_texture = r300_resource(
                   ctx->screen->resource_create(ctx->screen,
                                                &base));

                if (!trans->linear_texture) {
                    fprintf(stderr,
                            "r300: Failed to create a transfer object.\n");
                    FREE(trans);
                    return NULL;
                }
            }

            assert(!trans->linear_texture->tex.microtile &&
                   !trans->linear_texture->tex.macrotile[0]);

            /* Set the stride. */
            trans->transfer.stride =
                    trans->linear_texture->tex.stride_in_bytes[0];

            if (usage & PIPE_TRANSFER_READ) {
                /* We cannot map a tiled texture directly because the data is
                 * in a different order, therefore we do detiling using a blit. */
                r300_copy_from_tiled_texture(ctx, trans);

                /* Always referenced in the blit. */
                r300_flush(ctx, 0, NULL);
            }
        } else {
            /* Unpipelined transfer. */
            trans->transfer.stride = tex->tex.stride_in_bytes[level];
            trans->offset = r300_texture_get_offset(tex, level, box->z);

            if (referenced_cs &&
                !(usage & PIPE_TRANSFER_UNSYNCHRONIZED)) {
                r300_flush(ctx, 0, NULL);
            }
        }
    }

    if (trans->linear_texture) {
        /* The detiled texture is of the same size as the region being mapped
         * (no offset needed). */
        map = r300->rws->buffer_map(trans->linear_texture->cs_buf,
                                    r300->cs, usage);
        if (!map) {
            pipe_resource_reference(
                (struct pipe_resource**)&trans->linear_texture, NULL);
            FREE(trans);
            return NULL;
        }
	*transfer = &trans->transfer;
        return map;
    } else {
        /* Tiling is disabled. */
        map = r300->rws->buffer_map(tex->cs_buf, r300->cs, usage);
        if (!map) {
            FREE(trans);
            return NULL;
        }

	*transfer = &trans->transfer;
        return map + trans->offset +
            box->y / util_format_get_blockheight(format) * trans->transfer.stride +
            box->x / util_format_get_blockwidth(format) * util_format_get_blocksize(format);
    }
}