static void
vc4_clear(struct pipe_context *pctx, unsigned buffers,
          const union pipe_color_union *color, double depth, unsigned stencil)
{
        struct vc4_context *vc4 = vc4_context(pctx);

        /* We can't flag new buffers for clearing once we've queued draws.  We
         * could avoid this by using the 3d engine to clear.
         */
        if (vc4->draw_call_queued)
                vc4_flush(pctx);

        if (buffers & PIPE_CLEAR_COLOR0) {
                vc4->clear_color[0] = vc4->clear_color[1] =
                        pack_rgba(vc4->framebuffer.cbufs[0]->format,
                                  color->f);
        }

        if (buffers & PIPE_CLEAR_DEPTH) {
                /* Though the depth buffer is stored with Z in the high 24,
                 * for this field we just need to store it in the low 24.
                 */
                vc4->clear_depth = util_pack_z(PIPE_FORMAT_Z24X8_UNORM, depth);
        }

        if (buffers & PIPE_CLEAR_STENCIL)
                vc4->clear_stencil = stencil;

        vc4->cleared |= buffers;
        vc4->resolve |= buffers;

        vc4_start_draw(vc4);
}
static void
vc4_zsa_state_bind(struct pipe_context *pctx, void *hwcso)
{
        struct vc4_context *vc4 = vc4_context(pctx);
        vc4->zsa = hwcso;
        vc4->dirty |= VC4_DIRTY_ZSA;
}
Example #3
0
static void
vc4_context_destroy(struct pipe_context *pctx)
{
    struct vc4_context *vc4 = vc4_context(pctx);

    if (vc4->blitter)
        util_blitter_destroy(vc4->blitter);

    if (vc4->primconvert)
        util_primconvert_destroy(vc4->primconvert);

    if (vc4->uploader)
        u_upload_destroy(vc4->uploader);

    util_slab_destroy(&vc4->transfer_pool);

    pipe_surface_reference(&vc4->framebuffer.cbufs[0], NULL);
    pipe_surface_reference(&vc4->framebuffer.zsbuf, NULL);

    pipe_surface_reference(&vc4->color_write, NULL);
    pipe_surface_reference(&vc4->color_read, NULL);

    vc4_program_fini(pctx);

    ralloc_free(vc4);
}
static void
vc4_set_constant_buffer(struct pipe_context *pctx, uint shader, uint index,
                        struct pipe_constant_buffer *cb)
{
        struct vc4_context *vc4 = vc4_context(pctx);
        struct vc4_constbuf_stateobj *so = &vc4->constbuf[shader];

        assert(index == 0);

        /* Note that the state tracker can unbind constant buffers by
         * passing NULL here.
         */
        if (unlikely(!cb)) {
                so->enabled_mask &= ~(1 << index);
                so->dirty_mask &= ~(1 << index);
                return;
        }

        assert(!cb->buffer);
        so->cb[index].buffer_offset = cb->buffer_offset;
        so->cb[index].buffer_size   = cb->buffer_size;
        so->cb[index].user_buffer   = cb->user_buffer;

        so->enabled_mask |= 1 << index;
        so->dirty_mask |= 1 << index;
        vc4->dirty |= VC4_DIRTY_CONSTBUF;
}
static void
vc4_set_index_buffer(struct pipe_context *pctx,
                     const struct pipe_index_buffer *ib)
{
        struct vc4_context *vc4 = vc4_context(pctx);

        if (ib) {
                assert(!ib->user_buffer);

                if (ib->index_size == 4) {
                        struct pipe_resource tmpl = *ib->buffer;
                        assert(tmpl.format == PIPE_FORMAT_R8_UNORM);
                        assert(tmpl.height0 == 1);
                        tmpl.width0 = (tmpl.width0 - ib->offset) / 2;
                        struct pipe_resource *pshadow =
                                vc4_resource_create(&vc4->screen->base, &tmpl);
                        struct vc4_resource *shadow = vc4_resource(pshadow);
                        pipe_resource_reference(&shadow->shadow_parent, ib->buffer);

                        pipe_resource_reference(&vc4->indexbuf.buffer, NULL);
                        vc4->indexbuf.buffer = pshadow;
                        vc4->indexbuf.index_size = 2;
                        vc4->indexbuf.offset = 0;
                } else {
                        pipe_resource_reference(&vc4->indexbuf.buffer, ib->buffer);
                        vc4->indexbuf.index_size = ib->index_size;
                        vc4->indexbuf.offset = ib->offset;
                }
        } else {
                pipe_resource_reference(&vc4->indexbuf.buffer, NULL);
        }

        vc4->dirty |= VC4_DIRTY_INDEXBUF;
}
static void
vc4_blend_state_bind(struct pipe_context *pctx, void *hwcso)
{
        struct vc4_context *vc4 = vc4_context(pctx);
        vc4->blend = hwcso;
        vc4->dirty |= VC4_DIRTY_BLEND;
}
static void
vc4_set_sampler_views(struct pipe_context *pctx, unsigned shader,
                      unsigned start, unsigned nr,
                      struct pipe_sampler_view **views)
{
        struct vc4_context *vc4 = vc4_context(pctx);
        struct vc4_texture_stateobj *stage_tex = vc4_get_stage_tex(vc4, shader);
        unsigned i;
        unsigned new_nr = 0;

        assert(start == 0);

        vc4->dirty |= VC4_DIRTY_TEXSTATE;

        for (i = 0; i < nr; i++) {
                if (views[i]) {
                        new_nr = i + 1;
                        if (views[i]->u.tex.first_level != 0)
                                vc4_update_shadow_baselevel_texture(pctx, views[i]);
                }
                pipe_sampler_view_reference(&stage_tex->textures[i], views[i]);
                stage_tex->dirty_samplers |= (1 << i);
        }

        for (; i < stage_tex->num_textures; i++) {
                pipe_sampler_view_reference(&stage_tex->textures[i], NULL);
                stage_tex->dirty_samplers |= (1 << i);
        }

        stage_tex->num_textures = new_nr;
}
static void
vc4_sampler_states_bind(struct pipe_context *pctx,
                        unsigned shader, unsigned start,
                        unsigned nr, void **hwcso)
{
        struct vc4_context *vc4 = vc4_context(pctx);
        struct vc4_texture_stateobj *stage_tex = vc4_get_stage_tex(vc4, shader);

        assert(start == 0);
        unsigned i;
        unsigned new_nr = 0;

        for (i = 0; i < nr; i++) {
                if (hwcso[i])
                        new_nr = i + 1;
                stage_tex->samplers[i] = hwcso[i];
                stage_tex->dirty_samplers |= (1 << i);
        }

        for (; i < stage_tex->num_samplers; i++) {
                stage_tex->samplers[i] = NULL;
                stage_tex->dirty_samplers |= (1 << i);
        }

        stage_tex->num_samplers = new_nr;
}
Example #9
0
static void
vc4_set_sample_mask(struct pipe_context *pctx, unsigned sample_mask)
{
        struct vc4_context *vc4 = vc4_context(pctx);
        vc4->sample_mask = sample_mask & ((1 << VC4_MAX_SAMPLES) - 1);
        vc4->dirty |= VC4_DIRTY_SAMPLE_MASK;
}
static void
vc4_set_sample_mask(struct pipe_context *pctx, unsigned sample_mask)
{
        struct vc4_context *vc4 = vc4_context(pctx);
        vc4->sample_mask = (uint16_t)sample_mask;
        vc4->dirty |= VC4_DIRTY_SAMPLE_MASK;
}
Example #11
0
void
vc4_flush(struct pipe_context *pctx)
{
        struct vc4_context *vc4 = vc4_context(pctx);

        if (!vc4->needs_flush)
                return;

        /* The RCL setup would choke if the draw bounds cause no drawing, so
         * just drop the drawing if that's the case.
         */
        if (vc4->draw_max_x <= vc4->draw_min_x ||
            vc4->draw_max_y <= vc4->draw_min_y) {
                vc4_job_reset(vc4);
                return;
        }

        /* Increment the semaphore indicating that binning is done and
         * unblocking the render thread.  Note that this doesn't act until the
         * FLUSH completes.
         */
        cl_ensure_space(&vc4->bcl, 8);
        cl_u8(&vc4->bcl, VC4_PACKET_INCREMENT_SEMAPHORE);
        /* The FLUSH caps all of our bin lists with a VC4_PACKET_RETURN. */
        cl_u8(&vc4->bcl, VC4_PACKET_FLUSH);

        vc4_setup_rcl(vc4);

        vc4_job_submit(vc4);
}
Example #12
0
static bool
vc4_render_blit(struct pipe_context *ctx, struct pipe_blit_info *info)
{
        struct vc4_context *vc4 = vc4_context(ctx);

        if (!util_blitter_is_blit_supported(vc4->blitter, info)) {
                fprintf(stderr, "blit unsupported %s -> %s\n",
                    util_format_short_name(info->src.resource->format),
                    util_format_short_name(info->dst.resource->format));
                return false;
        }

        /* Enable the scissor, so we get a minimal set of tiles rendered. */
        if (!info->scissor_enable) {
                info->scissor_enable = true;
                info->scissor.minx = info->dst.box.x;
                info->scissor.miny = info->dst.box.y;
                info->scissor.maxx = info->dst.box.x + info->dst.box.width;
                info->scissor.maxy = info->dst.box.y + info->dst.box.height;
        }

        vc4_blitter_save(vc4);
        util_blitter_blit(vc4->blitter, info);

        return true;
}
static void
vc4_resource_transfer_unmap(struct pipe_context *pctx,
                            struct pipe_transfer *ptrans)
{
    struct vc4_context *vc4 = vc4_context(pctx);
    struct vc4_transfer *trans = vc4_transfer(ptrans);
    struct pipe_resource *prsc = ptrans->resource;
    struct vc4_resource *rsc = vc4_resource(prsc);
    struct vc4_resource_slice *slice = &rsc->slices[ptrans->level];

    if (trans->map) {
        if (ptrans->usage & PIPE_TRANSFER_WRITE) {
            vc4_store_tiled_image(rsc->bo->map + slice->offset +
                                  ptrans->box.z * rsc->cube_map_stride,
                                  slice->stride,
                                  trans->map, ptrans->stride,
                                  slice->tiling, rsc->cpp,
                                  &ptrans->box);
        }
        free(trans->map);
    }

    pipe_resource_reference(&ptrans->resource, NULL);
    util_slab_free(&vc4->transfer_pool, ptrans);
}
static bool
render_blit(struct pipe_context *ctx, struct pipe_blit_info *info)
{
    struct vc4_context *vc4 = vc4_context(ctx);

    if (!util_blitter_is_blit_supported(vc4->blitter, info)) {
        fprintf(stderr, "blit unsupported %s -> %s",
                util_format_short_name(info->src.resource->format),
                util_format_short_name(info->dst.resource->format));
        return false;
    }

    util_blitter_save_vertex_buffer_slot(vc4->blitter, vc4->vertexbuf.vb);
    util_blitter_save_vertex_elements(vc4->blitter, vc4->vtx);
    util_blitter_save_vertex_shader(vc4->blitter, vc4->prog.bind_vs);
    util_blitter_save_rasterizer(vc4->blitter, vc4->rasterizer);
    util_blitter_save_viewport(vc4->blitter, &vc4->viewport);
    util_blitter_save_scissor(vc4->blitter, &vc4->scissor);
    util_blitter_save_fragment_shader(vc4->blitter, vc4->prog.bind_fs);
    util_blitter_save_blend(vc4->blitter, vc4->blend);
    util_blitter_save_depth_stencil_alpha(vc4->blitter, vc4->zsa);
    util_blitter_save_stencil_ref(vc4->blitter, &vc4->stencil_ref);
    util_blitter_save_sample_mask(vc4->blitter, vc4->sample_mask);
    util_blitter_save_framebuffer(vc4->blitter, &vc4->framebuffer);
    util_blitter_save_fragment_sampler_states(vc4->blitter,
            vc4->fragtex.num_samplers,
            (void **)vc4->fragtex.samplers);
    util_blitter_save_fragment_sampler_views(vc4->blitter,
            vc4->fragtex.num_textures, vc4->fragtex.textures);

    util_blitter_blit(vc4->blitter, info);

    return true;
}
static void
vc4_vertex_state_bind(struct pipe_context *pctx, void *hwcso)
{
        struct vc4_context *vc4 = vc4_context(pctx);
        vc4->vtx = hwcso;
        vc4->dirty |= VC4_DIRTY_VTXSTATE;
}
Example #16
0
void
vc4_flush(struct pipe_context *pctx)
{
        struct vc4_context *vc4 = vc4_context(pctx);

        if (!vc4->needs_flush)
                return;

        cl_u8(&vc4->bcl, VC4_PACKET_FLUSH_ALL);
        cl_u8(&vc4->bcl, VC4_PACKET_NOP);
        cl_u8(&vc4->bcl, VC4_PACKET_HALT);

        vc4_setup_rcl(vc4);

        struct drm_vc4_submit_cl submit;
        memset(&submit, 0, sizeof(submit));

        submit.bo_handles = vc4->bo_handles.base;
        submit.bo_handle_count = (vc4->bo_handles.next -
                                  vc4->bo_handles.base) / 4;
        submit.bin_cl = vc4->bcl.base;
        submit.bin_cl_size = vc4->bcl.next - vc4->bcl.base;
        submit.render_cl = vc4->rcl.base;
        submit.render_cl_size = vc4->rcl.next - vc4->rcl.base;
        submit.shader_rec = vc4->shader_rec.base;
        submit.shader_rec_size = vc4->shader_rec.next - vc4->shader_rec.base;
        submit.shader_rec_count = vc4->shader_rec_count;
        submit.uniforms = vc4->uniforms.base;
        submit.uniforms_size = vc4->uniforms.next - vc4->uniforms.base;

        if (!(vc4_debug & VC4_DEBUG_NORAST)) {
                int ret;

#ifndef USE_VC4_SIMULATOR
                ret = drmIoctl(vc4->fd, DRM_IOCTL_VC4_SUBMIT_CL, &submit);
#else
                ret = vc4_simulator_flush(vc4, &submit);
#endif
                if (ret)
                        errx(1, "VC4 submit failed\n");
        }

        vc4_reset_cl(&vc4->bcl);
        vc4_reset_cl(&vc4->rcl);
        vc4_reset_cl(&vc4->shader_rec);
        vc4_reset_cl(&vc4->uniforms);
        vc4_reset_cl(&vc4->bo_handles);
        struct vc4_bo **referenced_bos = vc4->bo_pointers.base;
        for (int i = 0; i < submit.bo_handle_count; i++)
                vc4_bo_unreference(&referenced_bos[i]);
        vc4_reset_cl(&vc4->bo_pointers);
        vc4->shader_rec_count = 0;

        vc4->needs_flush = false;
        vc4->draw_call_queued = false;
        vc4->dirty = ~0;
        vc4->resolve = 0;
        vc4->cleared = 0;
}
static void
vc4_flush_resource(struct pipe_context *pctx, struct pipe_resource *resource)
{
    struct vc4_context *vc4 = vc4_context(pctx);

    /* XXX: Skip this if we don't have any queued drawing to it. */
    vc4->base.flush(pctx, NULL, 0);
}
static void
vc4_set_polygon_stipple(struct pipe_context *pctx,
                        const struct pipe_poly_stipple *stipple)
{
        struct vc4_context *vc4 = vc4_context(pctx);
        vc4->stipple = *stipple;
        vc4->dirty |= VC4_DIRTY_STIPPLE;
}
static void
vc4_set_stencil_ref(struct pipe_context *pctx,
                    const struct pipe_stencil_ref *stencil_ref)
{
        struct vc4_context *vc4 = vc4_context(pctx);
        vc4->stencil_ref =* stencil_ref;
        vc4->dirty |= VC4_DIRTY_STENCIL_REF;
}
static void
vc4_set_clip_state(struct pipe_context *pctx,
                   const struct pipe_clip_state *clip)
{
        struct vc4_context *vc4 = vc4_context(pctx);
        vc4->clip = *clip;
        vc4->dirty |= VC4_DIRTY_CLIP;
}
static void
vc4_set_blend_color(struct pipe_context *pctx,
                    const struct pipe_blend_color *blend_color)
{
        struct vc4_context *vc4 = vc4_context(pctx);
        vc4->blend_color = *blend_color;
        vc4->dirty |= VC4_DIRTY_BLEND_COLOR;
}
Example #22
0
void
vc4_flush(struct pipe_context *pctx)
{
        struct vc4_context *vc4 = vc4_context(pctx);
        struct pipe_surface *cbuf = vc4->framebuffer.cbufs[0];
        struct pipe_surface *zsbuf = vc4->framebuffer.zsbuf;

        if (!vc4->needs_flush)
                return;

        /* The RCL setup would choke if the draw bounds cause no drawing, so
         * just drop the drawing if that's the case.
         */
        if (vc4->draw_max_x <= vc4->draw_min_x ||
            vc4->draw_max_y <= vc4->draw_min_y) {
                vc4_job_reset(vc4);
                return;
        }

        /* Increment the semaphore indicating that binning is done and
         * unblocking the render thread.  Note that this doesn't act until the
         * FLUSH completes.
         */
        cl_ensure_space(&vc4->bcl, 8);
        struct vc4_cl_out *bcl = cl_start(&vc4->bcl);
        cl_u8(&bcl, VC4_PACKET_INCREMENT_SEMAPHORE);
        /* The FLUSH caps all of our bin lists with a VC4_PACKET_RETURN. */
        cl_u8(&bcl, VC4_PACKET_FLUSH);
        cl_end(&vc4->bcl, bcl);

        if (cbuf && (vc4->resolve & PIPE_CLEAR_COLOR0)) {
                pipe_surface_reference(&vc4->color_write, cbuf);
                if (!(vc4->cleared & PIPE_CLEAR_COLOR0)) {
                        pipe_surface_reference(&vc4->color_read, cbuf);
                } else {
                        pipe_surface_reference(&vc4->color_read, NULL);
                }

        } else {
                pipe_surface_reference(&vc4->color_write, NULL);
                pipe_surface_reference(&vc4->color_read, NULL);
        }

        if (vc4->framebuffer.zsbuf &&
            (vc4->resolve & (PIPE_CLEAR_DEPTH | PIPE_CLEAR_STENCIL))) {
                pipe_surface_reference(&vc4->zs_write, zsbuf);
                if (!(vc4->cleared & (PIPE_CLEAR_DEPTH | PIPE_CLEAR_STENCIL))) {
                        pipe_surface_reference(&vc4->zs_read, zsbuf);
                } else {
                        pipe_surface_reference(&vc4->zs_read, NULL);
                }
        } else {
                pipe_surface_reference(&vc4->zs_write, NULL);
                pipe_surface_reference(&vc4->zs_read, NULL);
        }

        vc4_job_submit(vc4);
}
Example #23
0
static void
vc4_invalidate_resource(struct pipe_context *pctx, struct pipe_resource *prsc)
{
    struct vc4_context *vc4 = vc4_context(pctx);
    struct pipe_surface *zsurf = vc4->framebuffer.zsbuf;

    if (zsurf && zsurf->texture == prsc)
        vc4->resolve &= ~(PIPE_CLEAR_DEPTH | PIPE_CLEAR_STENCIL);
}
Example #24
0
static void
vc4_set_blend_color(struct pipe_context *pctx,
                    const struct pipe_blend_color *blend_color)
{
        struct vc4_context *vc4 = vc4_context(pctx);
        vc4->blend_color.f = *blend_color;
        for (int i = 0; i < 4; i++)
                vc4->blend_color.ub[i] = float_to_ubyte(blend_color->color[i]);
        vc4->dirty |= VC4_DIRTY_BLEND_COLOR;
}
static void
vc4_set_viewport_states(struct pipe_context *pctx,
                        unsigned start_slot,
                        unsigned num_viewports,
                        const struct pipe_viewport_state *viewport)
{
        struct vc4_context *vc4 = vc4_context(pctx);
        vc4->viewport = *viewport;
        vc4->dirty |= VC4_DIRTY_VIEWPORT;
}
Example #26
0
/**
 * HW-2116 workaround: Flush the batch before triggering the hardware state
 * counter wraparound behavior.
 *
 * State updates are tracked by a global counter which increments at the first
 * state update after a draw or a START_BINNING.  Tiles can then have their
 * state updated at draw time with a set of cheap checks for whether the
 * state's copy of the global counter matches the global counter the last time
 * that state was written to the tile.
 *
 * The state counters are relatively small and wrap around quickly, so you
 * could get false negatives for needing to update a particular state in the
 * tile.  To avoid this, the hardware attempts to write all of the state in
 * the tile at wraparound time.  This apparently is broken, so we just flush
 * everything before that behavior is triggered.  A batch flush is sufficient
 * to get our current contents drawn and reset the counters to 0.
 *
 * Note that we can't just use VC4_PACKET_FLUSH_ALL, because that caps the
 * tiles with VC4_PACKET_RETURN_FROM_LIST.
 */
static void
vc4_hw_2116_workaround(struct pipe_context *pctx)
{
        struct vc4_context *vc4 = vc4_context(pctx);

        if (vc4->draw_calls_queued == 0x1ef0) {
                perf_debug("Flushing batch due to HW-2116 workaround "
                           "(too many draw calls per scene\n");
                vc4_flush(pctx);
        }
}
static void
vc4_set_scissor_states(struct pipe_context *pctx,
                       unsigned start_slot,
                       unsigned num_scissors,
                       const struct pipe_scissor_state *scissor)
{
        struct vc4_context *vc4 = vc4_context(pctx);

        vc4->scissor = *scissor;
        vc4->dirty |= VC4_DIRTY_SCISSOR;
}
Example #28
0
/**
 * HW-2116 workaround: Flush the batch before triggering the hardware state
 * counter wraparound behavior.
 *
 * State updates are tracked by a global counter which increments at the first
 * state update after a draw or a START_BINNING.  Tiles can then have their
 * state updated at draw time with a set of cheap checks for whether the
 * state's copy of the global counter matches the global counter the last time
 * that state was written to the tile.
 *
 * The state counters are relatively small and wrap around quickly, so you
 * could get false negatives for needing to update a particular state in the
 * tile.  To avoid this, the hardware attempts to write all of the state in
 * the tile at wraparound time.  This apparently is broken, so we just flush
 * everything before that behavior is triggered.  A batch flush is sufficient
 * to get our current contents drawn and reset the counters to 0.
 *
 * Note that we can't just use VC4_PACKET_FLUSH_ALL, because that caps the
 * tiles with VC4_PACKET_RETURN_FROM_LIST.
 */
static void
vc4_hw_2116_workaround(struct pipe_context *pctx, int vert_count)
{
        struct vc4_context *vc4 = vc4_context(pctx);
        struct vc4_job *job = vc4_get_job_for_fbo(vc4);

        if (job->draw_calls_queued + vert_count / 65535 >= VC4_HW_2116_COUNT) {
                perf_debug("Flushing batch due to HW-2116 workaround "
                           "(too many draw calls per scene\n");
                vc4_job_submit(vc4, job);
        }
}
Example #29
0
static void
vc4_pipe_flush(struct pipe_context *pctx, struct pipe_fence_handle **fence,
               unsigned flags)
{
        struct vc4_context *vc4 = vc4_context(pctx);

        vc4_flush(pctx);

        if (fence) {
                struct vc4_fence *f = vc4_fence_create(vc4->screen,
                                                       vc4->last_emit_seqno);
                *fence = (struct pipe_fence_handle *)f;
        }
}
static void
vc4_rasterizer_state_bind(struct pipe_context *pctx, void *hwcso)
{
        struct vc4_context *vc4 = vc4_context(pctx);
        struct vc4_rasterizer_state *rast = hwcso;

        if (vc4->rasterizer && rast &&
            vc4->rasterizer->base.flatshade != rast->base.flatshade) {
                vc4->dirty |= VC4_DIRTY_FLAT_SHADE_FLAGS;
        }

        vc4->rasterizer = hwcso;
        vc4->dirty |= VC4_DIRTY_RASTERIZER;
}