예제 #1
0
/**
 * Check if the requested number of dwords is available in the CS and
 * if not, flush.
 * \param r300          The context.
 * \param flags         See r300_prepare_flags.
 * \param cs_dwords     The number of dwords to reserve in CS.
 * \return TRUE if the CS was flushed
 */
static boolean r300_reserve_cs_dwords(struct r300_context *r300,
                                      enum r300_prepare_flags flags,
                                      unsigned cs_dwords)
{
    boolean flushed        = FALSE;
    boolean emit_states    = flags & PREP_EMIT_STATES;
    boolean emit_vertex_arrays       = flags & PREP_EMIT_VARRAYS;
    boolean emit_vertex_arrays_swtcl = flags & PREP_EMIT_VARRAYS_SWTCL;

    /* Add dirty state, index offset, and AOS. */
    if (emit_states)
        cs_dwords += r300_get_num_dirty_dwords(r300);

    if (r300->screen->caps.is_r500)
        cs_dwords += 2; /* emit_index_offset */

    if (emit_vertex_arrays)
        cs_dwords += 55; /* emit_vertex_arrays */

    if (emit_vertex_arrays_swtcl)
        cs_dwords += 7; /* emit_vertex_arrays_swtcl */

    cs_dwords += r300_get_num_cs_end_dwords(r300);

    /* Reserve requested CS space. */
    if (cs_dwords > (RADEON_MAX_CMDBUF_DWORDS - r300->cs->cdw)) {
        r300_flush(&r300->context, RADEON_FLUSH_ASYNC, NULL);
        flushed = TRUE;
    }

    return flushed;
}
예제 #2
0
static void r300_flush_callback(void *data, unsigned flags,
				struct pipe_fence_handle **fence)
{
    struct r300_context* const cs_context_copy = data;

    r300_flush(&cs_context_copy->context, flags, fence);
}
예제 #3
0
static void r300_flush_wrapped(struct pipe_context *pipe,
                               struct pipe_fence_handle **fence,
                               unsigned flags)
{
    r300_flush(pipe,
               flags & PIPE_FLUSH_END_OF_FRAME ? RADEON_FLUSH_END_OF_FRAME : 0,
               fence);
}
예제 #4
0
/* Copy a detiled texture to a tiled one. */
static void r300_copy_into_tiled_texture(struct pipe_context *ctx,
                                         struct r300_transfer *r300transfer)
{
    struct pipe_transfer *transfer = (struct pipe_transfer*)r300transfer;
    struct pipe_resource *tex = transfer->resource;
    struct pipe_box src_box;
    u_box_origin_2d(transfer->box.width, transfer->box.height, &src_box);

    ctx->resource_copy_region(ctx, tex, transfer->level,
                              transfer->box.x, transfer->box.y, transfer->box.z,
                              &r300transfer->linear_texture->b.b.b, 0, &src_box);

    /* XXX remove this. */
    r300_flush(ctx, 0, NULL);
}
예제 #5
0
파일: r300_query.c 프로젝트: alesegdia/mesa
static void r300_end_query(struct pipe_context* pipe,
	                   struct pipe_query* query)
{
    struct r300_context* r300 = r300_context(pipe);
    struct r300_query *q = r300_query(query);

    if (q->type == PIPE_QUERY_GPU_FINISHED) {
        pb_reference(&q->buf, NULL);
        r300_flush(pipe, RADEON_FLUSH_ASYNC,
                   (struct pipe_fence_handle**)&q->buf);
        return;
    }

    if (q != r300->query_current) {
        fprintf(stderr, "r300: end_query: Got invalid query.\n");
        assert(0);
        return;
    }

    r300_stop_query(r300);
}
예제 #6
0
/* Clear currently bound buffers. */
static void r300_clear(struct pipe_context* pipe,
                       unsigned buffers,
                       const union pipe_color_union *color,
                       double depth,
                       unsigned stencil)
{
    /* My notes about Zbuffer compression:
     *
     * 1) The zbuffer must be micro-tiled and whole microtiles must be
     *    written if compression is enabled. If microtiling is disabled,
     *    it locks up.
     *
     * 2) There is ZMASK RAM which contains a compressed zbuffer.
     *    Each dword of the Z Mask contains compression information
     *    for 16 4x4 pixel tiles, that is 2 bits for each tile.
     *    On chips with 2 Z pipes, every other dword maps to a different
     *    pipe. On newer chipsets, there is a new compression mode
     *    with 8x8 pixel tiles per 2 bits.
     *
     * 3) The FASTFILL bit has nothing to do with filling. It only tells hw
     *    it should look in the ZMASK RAM first before fetching from a real
     *    zbuffer.
     *
     * 4) If a pixel is in a cleared state, ZB_DEPTHCLEARVALUE is returned
     *    during zbuffer reads instead of the value that is actually stored
     *    in the zbuffer memory. A pixel is in a cleared state when its ZMASK
     *    is equal to 0. Therefore, if you clear ZMASK with zeros, you may
     *    leave the zbuffer memory uninitialized, but then you must enable
     *    compression, so that the ZMASK RAM is actually used.
     *
     * 5) Each 4x4 (or 8x8) tile is automatically decompressed and recompressed
     *    during zbuffer updates. A special decompressing operation should be
     *    used to fully decompress a zbuffer, which basically just stores all
     *    compressed tiles in ZMASK to the zbuffer memory.
     *
     * 6) For a 16-bit zbuffer, compression causes a hung with one or
     *    two samples and should not be used.
     *
     * 7) FORCE_COMPRESSED_STENCIL_VALUE should be enabled for stencil clears
     *    to avoid needless decompression.
     *
     * 8) Fastfill must not be used if reading of compressed Z data is disabled
     *    and writing of compressed Z data is enabled (RD/WR_COMP_ENABLE),
     *    i.e. it cannot be used to compress the zbuffer.
     *
     * 9) ZB_CB_CLEAR does not interact with zbuffer compression in any way.
     *
     * - Marek
     */

    struct r300_context* r300 = r300_context(pipe);
    struct pipe_framebuffer_state *fb =
        (struct pipe_framebuffer_state*)r300->fb_state.state;
    struct r300_hyperz_state *hyperz =
        (struct r300_hyperz_state*)r300->hyperz_state.state;
    uint32_t width = fb->width;
    uint32_t height = fb->height;
    uint32_t hyperz_dcv = hyperz->zb_depthclearvalue;

    /* Enable fast Z clear.
     * The zbuffer must be in micro-tiled mode, otherwise it locks up. */
    if (buffers & PIPE_CLEAR_DEPTHSTENCIL) {
        boolean zmask_clear, hiz_clear;

        zmask_clear = r300_fast_zclear_allowed(r300);
        hiz_clear = r300_hiz_clear_allowed(r300);

        /* If we need Hyper-Z. */
        if (zmask_clear || hiz_clear) {
            r300->num_z_clears++;

            /* Try to obtain the access to Hyper-Z buffers if we don't have one. */
            if (!r300->hyperz_enabled) {
                r300->hyperz_enabled =
                    r300->rws->cs_request_feature(r300->cs,
                                                RADEON_FID_R300_HYPERZ_ACCESS,
                                                TRUE);
                if (r300->hyperz_enabled) {
                   /* Need to emit HyperZ buffer regs for the first time. */
                   r300_mark_fb_state_dirty(r300, R300_CHANGED_HYPERZ_FLAG);
                }
            }

            /* Setup Hyper-Z clears. */
            if (r300->hyperz_enabled) {
                DBG(r300, DBG_HYPERZ, "r300: Clear memory: %s%s\n",
                    zmask_clear ? "ZMASK " : "", hiz_clear ? "HIZ" : "");

                if (zmask_clear) {
                    hyperz_dcv = hyperz->zb_depthclearvalue =
                        r300_depth_clear_value(fb->zsbuf->format, depth, stencil);

                    r300_mark_atom_dirty(r300, &r300->zmask_clear);
                    buffers &= ~PIPE_CLEAR_DEPTHSTENCIL;
                }

                if (hiz_clear) {
                    r300->hiz_clear_value = r300_hiz_clear_value(depth);
                    r300_mark_atom_dirty(r300, &r300->hiz_clear);
                }
            }
        }
    }

    /* Enable CBZB clear. */
    if (r300_cbzb_clear_allowed(r300, buffers)) {
        struct r300_surface *surf = r300_surface(fb->cbufs[0]);

        hyperz->zb_depthclearvalue =
                r300_depth_clear_cb_value(surf->base.format, color->f);

        width = surf->cbzb_width;
        height = surf->cbzb_height;

        r300->cbzb_clear = TRUE;
        r300_mark_fb_state_dirty(r300, R300_CHANGED_HYPERZ_FLAG);
    }

    /* Clear. */
    if (buffers) {
        enum pipe_format cformat = fb->nr_cbufs ? fb->cbufs[0]->format : PIPE_FORMAT_NONE;
        /* Clear using the blitter. */
        r300_blitter_begin(r300, R300_CLEAR);
        util_blitter_clear(r300->blitter,
                           width,
                           height,
                           fb->nr_cbufs,
                           buffers, cformat, color, depth, stencil);
        r300_blitter_end(r300);
    } else if (r300->zmask_clear.dirty || r300->hiz_clear.dirty) {
        /* Just clear zmask and hiz now, this does not use the standard draw
         * procedure. */
        /* Calculate zmask_clear and hiz_clear atom sizes. */
        unsigned dwords =
            (r300->zmask_clear.dirty ? r300->zmask_clear.size : 0) +
            (r300->hiz_clear.dirty ? r300->hiz_clear.size : 0) +
            r300_get_num_cs_end_dwords(r300);

        /* Reserve CS space. */
        if (dwords > (RADEON_MAX_CMDBUF_DWORDS - r300->cs->cdw)) {
            r300_flush(&r300->context, RADEON_FLUSH_ASYNC, NULL);
        }

        /* Emit clear packets. */
        if (r300->zmask_clear.dirty) {
            r300_emit_zmask_clear(r300, r300->zmask_clear.size,
                                  r300->zmask_clear.state);
            r300->zmask_clear.dirty = FALSE;
        }
        if (r300->hiz_clear.dirty) {
            r300_emit_hiz_clear(r300, r300->hiz_clear.size,
                                r300->hiz_clear.state);
            r300->hiz_clear.dirty = FALSE;
        }
    } else {
        assert(0);
    }

    /* Disable CBZB clear. */
    if (r300->cbzb_clear) {
        r300->cbzb_clear = FALSE;
        hyperz->zb_depthclearvalue = hyperz_dcv;
        r300_mark_fb_state_dirty(r300, R300_CHANGED_HYPERZ_FLAG);
    }

    /* Enable fastfill and/or hiz.
     *
     * If we cleared zmask/hiz, it's in use now. The Hyper-Z state update
     * looks if zmask/hiz is in use and programs hardware accordingly. */
    if (r300->zmask_in_use || r300->hiz_in_use) {
        r300_mark_atom_dirty(r300, &r300->hyperz_state);
    }
}
예제 #7
0
struct pipe_transfer*
r300_texture_get_transfer(struct pipe_context *ctx,
                          struct pipe_resource *texture,
                          unsigned level,
                          unsigned usage,
                          const struct pipe_box *box)
{
    struct r300_context *r300 = r300_context(ctx);
    struct r300_resource *tex = r300_resource(texture);
    struct r300_transfer *trans;
    struct pipe_resource base;
    boolean referenced_cs, referenced_hw, blittable;
    const struct util_format_description *desc =
        util_format_description(texture->format);

    referenced_cs =
        r300->rws->cs_is_buffer_referenced(r300->cs, tex->cs_buf);
    if (referenced_cs) {
        referenced_hw = TRUE;
    } else {
        referenced_hw =
            r300->rws->buffer_is_busy(tex->buf, RADEON_USAGE_READWRITE);
    }

    blittable = desc->layout == UTIL_FORMAT_LAYOUT_PLAIN ||
                desc->layout == UTIL_FORMAT_LAYOUT_S3TC ||
                desc->layout == UTIL_FORMAT_LAYOUT_RGTC;

    trans = CALLOC_STRUCT(r300_transfer);
    if (trans) {
        /* Initialize the transfer object. */
        pipe_resource_reference(&trans->transfer.resource, texture);
        trans->transfer.level = level;
        trans->transfer.usage = usage;
        trans->transfer.box = *box;

        /* If the texture is tiled, we must create a temporary detiled texture
         * for this transfer.
         * Also make write transfers pipelined. */
        if (tex->tex.microtile || tex->tex.macrotile[level] ||
            (referenced_hw && blittable && !(usage & PIPE_TRANSFER_READ))) {
            if (r300->blitter->running) {
                fprintf(stderr, "r300: ERROR: Blitter recursion in texture_get_transfer.\n");
                os_break();
            }

            base.target = PIPE_TEXTURE_2D;
            base.format = texture->format;
            base.width0 = box->width;
            base.height0 = box->height;
            base.depth0 = 1;
            base.array_size = 1;
            base.last_level = 0;
            base.nr_samples = 0;
            base.usage = PIPE_USAGE_DYNAMIC;
            base.bind = 0;
            base.flags = R300_RESOURCE_FLAG_TRANSFER;

            /* For texture reading, the temporary (detiled) texture is used as
             * a render target when blitting from a tiled texture. */
            if (usage & PIPE_TRANSFER_READ) {
                base.bind |= PIPE_BIND_RENDER_TARGET;
            }
            /* For texture writing, the temporary texture is used as a sampler
             * when blitting into a tiled texture. */
            if (usage & PIPE_TRANSFER_WRITE) {
                base.bind |= PIPE_BIND_SAMPLER_VIEW;
            }

            /* Create the temporary texture. */
            trans->linear_texture = r300_resource(
               ctx->screen->resource_create(ctx->screen,
                                            &base));

            if (!trans->linear_texture) {
                /* Oh crap, the thing can't create the texture.
                 * Let's flush and try again. */
                r300_flush(ctx, 0, NULL);

                trans->linear_texture = r300_resource(
                   ctx->screen->resource_create(ctx->screen,
                                                &base));

                if (!trans->linear_texture) {
                    /* For linear textures, it's safe to fallback to
                     * an unpipelined transfer. */
                    if (!tex->tex.microtile && !tex->tex.macrotile[level]) {
                        goto unpipelined;
                    }

                    /* Otherwise, go to hell. */
                    fprintf(stderr,
                        "r300: Failed to create a transfer object, praise.\n");
                    FREE(trans);
                    return NULL;
                }
            }

            assert(!trans->linear_texture->tex.microtile &&
                   !trans->linear_texture->tex.macrotile[0]);

            /* Set the stride. */
            trans->transfer.stride =
                    trans->linear_texture->tex.stride_in_bytes[0];

            if (usage & PIPE_TRANSFER_READ) {
                /* We cannot map a tiled texture directly because the data is
                 * in a different order, therefore we do detiling using a blit. */
                r300_copy_from_tiled_texture(ctx, trans);

                /* Always referenced in the blit. */
                r300_flush(ctx, 0, NULL);
            }
            return &trans->transfer;
        }

    unpipelined:
        /* Unpipelined transfer. */
        trans->transfer.stride = tex->tex.stride_in_bytes[level];
        trans->offset = r300_texture_get_offset(tex, level, box->z);

        if (referenced_cs &&
            !(usage & PIPE_TRANSFER_UNSYNCHRONIZED))
            r300_flush(ctx, 0, NULL);
        return &trans->transfer;
    }
    return NULL;
}
예제 #8
0
파일: r300_context.c 프로젝트: soreau/mesa
static void r300_flush_callback(void *data, unsigned flags)
{
    struct r300_context* const cs_context_copy = data;

    r300_flush(&cs_context_copy->context, flags, NULL);
}
예제 #9
0
void *
r300_texture_transfer_map(struct pipe_context *ctx,
                          struct pipe_resource *texture,
                          unsigned level,
                          unsigned usage,
                          const struct pipe_box *box,
                          struct pipe_transfer **transfer)
{
    struct r300_context *r300 = r300_context(ctx);
    struct r300_resource *tex = r300_resource(texture);
    struct r300_transfer *trans;
    struct pipe_resource base;
    boolean referenced_cs, referenced_hw;
    enum pipe_format format = tex->b.b.format;
    char *map;

    referenced_cs =
        r300->rws->cs_is_buffer_referenced(r300->cs, tex->cs_buf, RADEON_USAGE_READWRITE);
    if (referenced_cs) {
        referenced_hw = TRUE;
    } else {
        referenced_hw =
            r300->rws->buffer_is_busy(tex->buf, RADEON_USAGE_READWRITE);
    }

    trans = CALLOC_STRUCT(r300_transfer);
    if (trans) {
        /* Initialize the transfer object. */
        trans->transfer.resource = texture;
        trans->transfer.level = level;
        trans->transfer.usage = usage;
        trans->transfer.box = *box;

        /* If the texture is tiled, we must create a temporary detiled texture
         * for this transfer.
         * Also make write transfers pipelined. */
        if (tex->tex.microtile || tex->tex.macrotile[level] ||
            (referenced_hw && !(usage & PIPE_TRANSFER_READ) &&
             r300_is_blit_supported(texture->format))) {
            if (r300->blitter->running) {
                fprintf(stderr, "r300: ERROR: Blitter recursion in texture_get_transfer.\n");
                os_break();
            }

            base.target = PIPE_TEXTURE_2D;
            base.format = texture->format;
            base.width0 = box->width;
            base.height0 = box->height;
            base.depth0 = 1;
            base.array_size = 1;
            base.last_level = 0;
            base.nr_samples = 0;
            base.usage = PIPE_USAGE_STAGING;
            base.bind = 0;
            if (usage & PIPE_TRANSFER_READ) {
                base.bind |= PIPE_BIND_SAMPLER_VIEW;
            }
            if (usage & PIPE_TRANSFER_WRITE) {
                base.bind |= PIPE_BIND_RENDER_TARGET;
            }
            base.flags = R300_RESOURCE_FLAG_TRANSFER;

            /* For texture reading, the temporary (detiled) texture is used as
             * a render target when blitting from a tiled texture. */
            if (usage & PIPE_TRANSFER_READ) {
                base.bind |= PIPE_BIND_RENDER_TARGET;
            }
            /* For texture writing, the temporary texture is used as a sampler
             * when blitting into a tiled texture. */
            if (usage & PIPE_TRANSFER_WRITE) {
                base.bind |= PIPE_BIND_SAMPLER_VIEW;
            }

            /* Create the temporary texture. */
            trans->linear_texture = r300_resource(
               ctx->screen->resource_create(ctx->screen,
                                            &base));

            if (!trans->linear_texture) {
                /* Oh crap, the thing can't create the texture.
                 * Let's flush and try again. */
                r300_flush(ctx, 0, NULL);

                trans->linear_texture = r300_resource(
                   ctx->screen->resource_create(ctx->screen,
                                                &base));

                if (!trans->linear_texture) {
                    fprintf(stderr,
                            "r300: Failed to create a transfer object.\n");
                    FREE(trans);
                    return NULL;
                }
            }

            assert(!trans->linear_texture->tex.microtile &&
                   !trans->linear_texture->tex.macrotile[0]);

            /* Set the stride. */
            trans->transfer.stride =
                    trans->linear_texture->tex.stride_in_bytes[0];

            if (usage & PIPE_TRANSFER_READ) {
                /* We cannot map a tiled texture directly because the data is
                 * in a different order, therefore we do detiling using a blit. */
                r300_copy_from_tiled_texture(ctx, trans);

                /* Always referenced in the blit. */
                r300_flush(ctx, 0, NULL);
            }
        } else {
            /* Unpipelined transfer. */
            trans->transfer.stride = tex->tex.stride_in_bytes[level];
            trans->offset = r300_texture_get_offset(tex, level, box->z);

            if (referenced_cs &&
                !(usage & PIPE_TRANSFER_UNSYNCHRONIZED)) {
                r300_flush(ctx, 0, NULL);
            }
        }
    }

    if (trans->linear_texture) {
        /* The detiled texture is of the same size as the region being mapped
         * (no offset needed). */
        map = r300->rws->buffer_map(trans->linear_texture->cs_buf,
                                    r300->cs, usage);
        if (!map) {
            pipe_resource_reference(
                (struct pipe_resource**)&trans->linear_texture, NULL);
            FREE(trans);
            return NULL;
        }
	*transfer = &trans->transfer;
        return map;
    } else {
        /* Tiling is disabled. */
        map = r300->rws->buffer_map(tex->cs_buf, r300->cs, usage);
        if (!map) {
            FREE(trans);
            return NULL;
        }

	*transfer = &trans->transfer;
        return map + trans->offset +
            box->y / util_format_get_blockheight(format) * trans->transfer.stride +
            box->x / util_format_get_blockwidth(format) * util_format_get_blocksize(format);
    }
}
예제 #10
0
static void r300_flush_wrapped(struct pipe_context *pipe,
                               struct pipe_fence_handle **fence)
{
    r300_flush(pipe, 0, fence);
}