static void fd_flush_resource(struct pipe_context *pctx, struct pipe_resource *prsc) { struct fd_resource *rsc = fd_resource(prsc); if (pending(rsc, FD_PENDING_WRITE | FD_PENDING_READ)) fd_context_render(pctx); }
static void fd_flush_resource(struct pipe_context *pctx, struct pipe_resource *prsc) { struct fd_resource *rsc = fd_resource(prsc); if (rsc->dirty) fd_context_render(pctx); }
static void fd_resource_transfer_flush_region(struct pipe_context *pctx, struct pipe_transfer *ptrans, const struct pipe_box *box) { struct fd_context *ctx = fd_context(pctx); struct fd_resource *rsc = fd_resource(ptrans->resource); if (rsc->dirty) fd_context_render(pctx); if (rsc->timestamp) { fd_pipe_wait(ctx->screen->pipe, rsc->timestamp); rsc->timestamp = 0; } }
static void fd_context_flush(struct pipe_context *pctx, struct pipe_fence_handle **fence, unsigned flags) { DBG("fence=%p", fence); #if 0 if (fence) { fd_fence_ref(ctx->screen->fence.current, (struct fd_fence **)fence); } #endif fd_context_render(pctx); }
static void fd_set_framebuffer_state(struct pipe_context *pctx, const struct pipe_framebuffer_state *framebuffer) { struct fd_context *ctx = fd_context(pctx); struct pipe_framebuffer_state *cso = &ctx->framebuffer; unsigned i; DBG("%d: cbufs[0]=%p, zsbuf=%p", ctx->needs_flush, cso->cbufs[0], cso->zsbuf); fd_context_render(pctx); for (i = 0; i < framebuffer->nr_cbufs; i++) pipe_surface_reference(&cso->cbufs[i], framebuffer->cbufs[i]); for (; i < ctx->framebuffer.nr_cbufs; i++) pipe_surface_reference(&cso->cbufs[i], NULL); cso->nr_cbufs = framebuffer->nr_cbufs; cso->width = framebuffer->width; cso->height = framebuffer->height; pipe_surface_reference(&cso->zsbuf, framebuffer->zsbuf); ctx->dirty |= FD_DIRTY_FRAMEBUFFER; /* also need to reset the scissor.. mesa/gl state tracker * does this for us, but u_blitter doesn't and other * state trackers might not.. */ ctx->scissor.minx = 0; ctx->scissor.miny = 0; ctx->scissor.maxx = cso->width; ctx->scissor.maxy = cso->height; ctx->dirty |= FD_DIRTY_SCISSOR; }
static void * fd_resource_transfer_map(struct pipe_context *pctx, struct pipe_resource *prsc, unsigned level, unsigned usage, const struct pipe_box *box, struct pipe_transfer **pptrans) { struct fd_context *ctx = fd_context(pctx); struct fd_resource *rsc = fd_resource(prsc); struct fd_resource_slice *slice = fd_resource_slice(rsc, level); struct fd_transfer *trans; struct pipe_transfer *ptrans; enum pipe_format format = prsc->format; uint32_t op = 0; uint32_t offset; char *buf; int ret = 0; DBG("prsc=%p, level=%u, usage=%x, box=%dx%d+%d,%d", prsc, level, usage, box->width, box->height, box->x, box->y); ptrans = util_slab_alloc(&ctx->transfer_pool); if (!ptrans) return NULL; /* util_slab_alloc() doesn't zero: */ trans = fd_transfer(ptrans); memset(trans, 0, sizeof(*trans)); pipe_resource_reference(&ptrans->resource, prsc); ptrans->level = level; ptrans->usage = usage; ptrans->box = *box; ptrans->stride = util_format_get_nblocksx(format, slice->pitch) * rsc->cpp; ptrans->layer_stride = rsc->layer_first ? rsc->layer_size : slice->size0; if (usage & PIPE_TRANSFER_READ) op |= DRM_FREEDRENO_PREP_READ; if (usage & PIPE_TRANSFER_WRITE) op |= DRM_FREEDRENO_PREP_WRITE; if (usage & PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE) { realloc_bo(rsc, fd_bo_size(rsc->bo)); if (rsc->stencil) realloc_bo(rsc->stencil, fd_bo_size(rsc->stencil->bo)); fd_invalidate_resource(ctx, prsc); } else if ((usage & PIPE_TRANSFER_WRITE) && prsc->target == PIPE_BUFFER && !util_ranges_intersect(&rsc->valid_buffer_range, box->x, box->x + box->width)) { /* We are trying to write to a previously uninitialized range. No need * to wait. */ } else if (!(usage & PIPE_TRANSFER_UNSYNCHRONIZED)) { /* If the GPU is writing to the resource, or if it is reading from the * resource and we're trying to write to it, flush the renders. */ if (((ptrans->usage & PIPE_TRANSFER_WRITE) && pending(rsc, FD_PENDING_READ | FD_PENDING_WRITE)) || pending(rsc, FD_PENDING_WRITE)) fd_context_render(pctx); /* The GPU keeps track of how the various bo's are being used, and * will wait if necessary for the proper operation to have * completed. */ ret = fd_bo_cpu_prep(rsc->bo, ctx->screen->pipe, op); if (ret) goto fail; } buf = fd_bo_map(rsc->bo); if (!buf) goto fail; offset = slice->offset + box->y / util_format_get_blockheight(format) * ptrans->stride + box->x / util_format_get_blockwidth(format) * rsc->cpp + fd_resource_layer_offset(rsc, slice, box->z); if (prsc->format == PIPE_FORMAT_Z32_FLOAT_S8X24_UINT || prsc->format == PIPE_FORMAT_X32_S8X24_UINT) { assert(trans->base.box.depth == 1); trans->base.stride = trans->base.box.width * rsc->cpp * 2; trans->staging = malloc(trans->base.stride * trans->base.box.height); if (!trans->staging) goto fail; /* if we're not discarding the whole range (or resource), we must copy * the real data in. */ if (!(usage & (PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE | PIPE_TRANSFER_DISCARD_RANGE))) { struct fd_resource_slice *sslice = fd_resource_slice(rsc->stencil, level); void *sbuf = fd_bo_map(rsc->stencil->bo); if (!sbuf) goto fail; float *depth = (float *)(buf + slice->offset + fd_resource_layer_offset(rsc, slice, box->z) + box->y * slice->pitch * 4 + box->x * 4); uint8_t *stencil = sbuf + sslice->offset + fd_resource_layer_offset(rsc->stencil, sslice, box->z) + box->y * sslice->pitch + box->x; if (format != PIPE_FORMAT_X32_S8X24_UINT) util_format_z32_float_s8x24_uint_pack_z_float( trans->staging, trans->base.stride, depth, slice->pitch * 4, box->width, box->height); util_format_z32_float_s8x24_uint_pack_s_8uint( trans->staging, trans->base.stride, stencil, sslice->pitch, box->width, box->height); } buf = trans->staging; offset = 0; } else if (rsc->internal_format != format && util_format_description(format)->layout == UTIL_FORMAT_LAYOUT_RGTC) { assert(trans->base.box.depth == 1); trans->base.stride = util_format_get_stride( format, trans->base.box.width); trans->staging = malloc( util_format_get_2d_size(format, trans->base.stride, trans->base.box.height)); if (!trans->staging) goto fail; /* if we're not discarding the whole range (or resource), we must copy * the real data in. */ if (!(usage & (PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE | PIPE_TRANSFER_DISCARD_RANGE))) { uint8_t *rgba8 = (uint8_t *)buf + slice->offset + fd_resource_layer_offset(rsc, slice, box->z) + box->y * slice->pitch * rsc->cpp + box->x * rsc->cpp; switch (format) { case PIPE_FORMAT_RGTC1_UNORM: case PIPE_FORMAT_RGTC1_SNORM: case PIPE_FORMAT_LATC1_UNORM: case PIPE_FORMAT_LATC1_SNORM: util_format_rgtc1_unorm_pack_rgba_8unorm( trans->staging, trans->base.stride, rgba8, slice->pitch * rsc->cpp, box->width, box->height); break; case PIPE_FORMAT_RGTC2_UNORM: case PIPE_FORMAT_RGTC2_SNORM: case PIPE_FORMAT_LATC2_UNORM: case PIPE_FORMAT_LATC2_SNORM: util_format_rgtc2_unorm_pack_rgba_8unorm( trans->staging, trans->base.stride, rgba8, slice->pitch * rsc->cpp, box->width, box->height); break; default: assert(!"Unexpected format"); break; } } buf = trans->staging; offset = 0; } *pptrans = ptrans; return buf + offset; fail: fd_resource_transfer_unmap(pctx, ptrans); return NULL; }
static void fd_draw_vbo(struct pipe_context *pctx, const struct pipe_draw_info *info) { struct fd_context *ctx = fd_context(pctx); struct pipe_framebuffer_state *pfb = &ctx->framebuffer; struct pipe_scissor_state *scissor = fd_context_get_scissor(ctx); unsigned i, prims, buffers = 0; /* if we supported transform feedback, we'd have to disable this: */ if (((scissor->maxx - scissor->minx) * (scissor->maxy - scissor->miny)) == 0) { return; } /* TODO: push down the region versions into the tiles */ if (!fd_render_condition_check(pctx)) return; /* emulate unsupported primitives: */ if (!fd_supported_prim(ctx, info->mode)) { if (ctx->streamout.num_targets > 0) debug_error("stream-out with emulated prims"); util_primconvert_save_index_buffer(ctx->primconvert, &ctx->indexbuf); util_primconvert_save_rasterizer_state(ctx->primconvert, ctx->rasterizer); util_primconvert_draw_vbo(ctx->primconvert, info); return; } ctx->needs_flush = true; /* * Figure out the buffers/features we need: */ if (fd_depth_enabled(ctx)) { buffers |= FD_BUFFER_DEPTH; resource_written(ctx, pfb->zsbuf->texture); ctx->gmem_reason |= FD_GMEM_DEPTH_ENABLED; } if (fd_stencil_enabled(ctx)) { buffers |= FD_BUFFER_STENCIL; resource_written(ctx, pfb->zsbuf->texture); ctx->gmem_reason |= FD_GMEM_STENCIL_ENABLED; } if (fd_logicop_enabled(ctx)) ctx->gmem_reason |= FD_GMEM_LOGICOP_ENABLED; for (i = 0; i < pfb->nr_cbufs; i++) { struct pipe_resource *surf; if (!pfb->cbufs[i]) continue; surf = pfb->cbufs[i]->texture; resource_written(ctx, surf); buffers |= PIPE_CLEAR_COLOR0 << i; if (surf->nr_samples > 1) ctx->gmem_reason |= FD_GMEM_MSAA_ENABLED; if (fd_blend_enabled(ctx, i)) ctx->gmem_reason |= FD_GMEM_BLEND_ENABLED; } /* Skip over buffer 0, that is sent along with the command stream */ for (i = 1; i < PIPE_MAX_CONSTANT_BUFFERS; i++) { resource_read(ctx, ctx->constbuf[PIPE_SHADER_VERTEX].cb[i].buffer); resource_read(ctx, ctx->constbuf[PIPE_SHADER_FRAGMENT].cb[i].buffer); } /* Mark VBOs as being read */ for (i = 0; i < ctx->vtx.vertexbuf.count; i++) { assert(!ctx->vtx.vertexbuf.vb[i].user_buffer); resource_read(ctx, ctx->vtx.vertexbuf.vb[i].buffer); } /* Mark index buffer as being read */ resource_read(ctx, ctx->indexbuf.buffer); /* Mark textures as being read */ for (i = 0; i < ctx->verttex.num_textures; i++) if (ctx->verttex.textures[i]) resource_read(ctx, ctx->verttex.textures[i]->texture); for (i = 0; i < ctx->fragtex.num_textures; i++) if (ctx->fragtex.textures[i]) resource_read(ctx, ctx->fragtex.textures[i]->texture); /* Mark streamout buffers as being written.. */ for (i = 0; i < ctx->streamout.num_targets; i++) if (ctx->streamout.targets[i]) resource_written(ctx, ctx->streamout.targets[i]->buffer); ctx->num_draws++; prims = u_reduced_prims_for_vertices(info->mode, info->count); ctx->stats.draw_calls++; /* TODO prims_emitted should be clipped when the stream-out buffer is * not large enough. See max_tf_vtx().. probably need to move that * into common code. Although a bit more annoying since a2xx doesn't * use ir3 so no common way to get at the pipe_stream_output_info * which is needed for this calculation. */ if (ctx->streamout.num_targets > 0) ctx->stats.prims_emitted += prims; ctx->stats.prims_generated += prims; /* any buffers that haven't been cleared yet, we need to restore: */ ctx->restore |= buffers & (FD_BUFFER_ALL & ~ctx->cleared); /* and any buffers used, need to be resolved: */ ctx->resolve |= buffers; DBG("%x num_draws=%u (%s/%s)", buffers, ctx->num_draws, util_format_short_name(pipe_surface_format(pfb->cbufs[0])), util_format_short_name(pipe_surface_format(pfb->zsbuf))); fd_hw_query_set_stage(ctx, ctx->ring, FD_STAGE_DRAW); ctx->draw_vbo(ctx, info); for (i = 0; i < ctx->streamout.num_targets; i++) ctx->streamout.offsets[i] += info->count; if (fd_mesa_debug & FD_DBG_DDRAW) ctx->dirty = 0xffffffff; /* if an app (or, well, piglit test) does many thousands of draws * without flush (or anything which implicitly flushes, like * changing render targets), we can exceed the ringbuffer size. * Since we don't currently have a sane way to wrapparound, and * we use the same buffer for both draw and tiling commands, for * now we need to do this hack and trigger flush if we are running * low on remaining space for cmds: */ if (((ctx->ring->cur - ctx->ring->start) > (ctx->ring->size/4 - FD_TILING_COMMANDS_DWORDS)) || (fd_mesa_debug & FD_DBG_FLUSH)) fd_context_render(pctx); }
static void * fd_resource_transfer_map(struct pipe_context *pctx, struct pipe_resource *prsc, unsigned level, unsigned usage, const struct pipe_box *box, struct pipe_transfer **pptrans) { struct fd_context *ctx = fd_context(pctx); struct fd_resource *rsc = fd_resource(prsc); struct fd_resource_slice *slice = fd_resource_slice(rsc, level); struct pipe_transfer *ptrans; enum pipe_format format = prsc->format; uint32_t op = 0; uint32_t offset; char *buf; int ret = 0; DBG("prsc=%p, level=%u, usage=%x", prsc, level, usage); ptrans = util_slab_alloc(&ctx->transfer_pool); if (!ptrans) return NULL; /* util_slab_alloc() doesn't zero: */ memset(ptrans, 0, sizeof(*ptrans)); pipe_resource_reference(&ptrans->resource, prsc); ptrans->level = level; ptrans->usage = usage; ptrans->box = *box; ptrans->stride = slice->pitch * rsc->cpp; ptrans->layer_stride = slice->size0; if (usage & PIPE_TRANSFER_READ) op |= DRM_FREEDRENO_PREP_READ; if (usage & PIPE_TRANSFER_WRITE) op |= DRM_FREEDRENO_PREP_WRITE; if (usage & PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE) { realloc_bo(rsc, fd_bo_size(rsc->bo)); fd_invalidate_resource(ctx, prsc); } else if ((usage & PIPE_TRANSFER_WRITE) && prsc->target == PIPE_BUFFER && !util_ranges_intersect(&rsc->valid_buffer_range, box->x, box->x + box->width)) { /* We are trying to write to a previously uninitialized range. No need * to wait. */ } else if (!(usage & PIPE_TRANSFER_UNSYNCHRONIZED)) { /* If the GPU is writing to the resource, or if it is reading from the * resource and we're trying to write to it, flush the renders. */ if (rsc->dirty || ((ptrans->usage & PIPE_TRANSFER_WRITE) && rsc->reading)) fd_context_render(pctx); /* The GPU keeps track of how the various bo's are being used, and * will wait if necessary for the proper operation to have * completed. */ ret = fd_bo_cpu_prep(rsc->bo, ctx->screen->pipe, op); if (ret) goto fail; } buf = fd_bo_map(rsc->bo); if (!buf) { fd_resource_transfer_unmap(pctx, ptrans); return NULL; } *pptrans = ptrans; if (rsc->layer_first) { offset = slice->offset + box->y / util_format_get_blockheight(format) * ptrans->stride + box->x / util_format_get_blockwidth(format) * rsc->cpp + box->z * rsc->layer_size; } else { offset = slice->offset + box->y / util_format_get_blockheight(format) * ptrans->stride + box->x / util_format_get_blockwidth(format) * rsc->cpp + box->z * slice->size0; } return buf + offset; fail: fd_resource_transfer_unmap(pctx, ptrans); return NULL; }
static void fd_draw_vbo(struct pipe_context *pctx, const struct pipe_draw_info *info) { struct fd_context *ctx = fd_context(pctx); struct pipe_framebuffer_state *pfb = &ctx->framebuffer; struct pipe_scissor_state *scissor = fd_context_get_scissor(ctx); unsigned i, buffers = 0; /* if we supported transform feedback, we'd have to disable this: */ if (((scissor->maxx - scissor->minx) * (scissor->maxy - scissor->miny)) == 0) { return; } /* emulate unsupported primitives: */ if (!fd_supported_prim(ctx, info->mode)) { util_primconvert_save_index_buffer(ctx->primconvert, &ctx->indexbuf); util_primconvert_save_rasterizer_state(ctx->primconvert, ctx->rasterizer); util_primconvert_draw_vbo(ctx->primconvert, info); return; } ctx->needs_flush = true; /* * Figure out the buffers/features we need: */ if (fd_depth_enabled(ctx)) { buffers |= FD_BUFFER_DEPTH; fd_resource(pfb->zsbuf->texture)->dirty = true; ctx->gmem_reason |= FD_GMEM_DEPTH_ENABLED; } if (fd_stencil_enabled(ctx)) { buffers |= FD_BUFFER_STENCIL; fd_resource(pfb->zsbuf->texture)->dirty = true; ctx->gmem_reason |= FD_GMEM_STENCIL_ENABLED; } if (fd_logicop_enabled(ctx)) ctx->gmem_reason |= FD_GMEM_LOGICOP_ENABLED; for (i = 0; i < pfb->nr_cbufs; i++) { struct pipe_resource *surf; if (!pfb->cbufs[i]) continue; surf = pfb->cbufs[i]->texture; fd_resource(surf)->dirty = true; buffers |= FD_BUFFER_COLOR; if (surf->nr_samples > 1) ctx->gmem_reason |= FD_GMEM_MSAA_ENABLED; if (fd_blend_enabled(ctx, i)) ctx->gmem_reason |= FD_GMEM_BLEND_ENABLED; } ctx->num_draws++; ctx->stats.draw_calls++; ctx->stats.prims_emitted += u_reduced_prims_for_vertices(info->mode, info->count); /* any buffers that haven't been cleared yet, we need to restore: */ ctx->restore |= buffers & (FD_BUFFER_ALL & ~ctx->cleared); /* and any buffers used, need to be resolved: */ ctx->resolve |= buffers; DBG("%x num_draws=%u (%s/%s)", buffers, ctx->num_draws, util_format_short_name(pipe_surface_format(pfb->cbufs[0])), util_format_short_name(pipe_surface_format(pfb->zsbuf))); fd_hw_query_set_stage(ctx, ctx->ring, FD_STAGE_DRAW); ctx->draw_vbo(ctx, info); /* if an app (or, well, piglit test) does many thousands of draws * without flush (or anything which implicitly flushes, like * changing render targets), we can exceed the ringbuffer size. * Since we don't currently have a sane way to wrapparound, and * we use the same buffer for both draw and tiling commands, for * now we need to do this hack and trigger flush if we are running * low on remaining space for cmds: */ if (((ctx->ring->cur - ctx->ring->start) > (ctx->ring->size/4 - FD_TILING_COMMANDS_DWORDS)) || (fd_mesa_debug & FD_DBG_FLUSH)) fd_context_render(pctx); }