static void fd_resource_flush_z32s8(struct fd_transfer *trans, const struct pipe_box *box) { struct fd_resource *rsc = fd_resource(trans->base.resource); struct fd_resource_slice *slice = fd_resource_slice(rsc, trans->base.level); struct fd_resource_slice *sslice = fd_resource_slice(rsc->stencil, trans->base.level); enum pipe_format format = trans->base.resource->format; float *depth = fd_bo_map(rsc->bo) + slice->offset + fd_resource_layer_offset(rsc, slice, trans->base.box.z) + (trans->base.box.y + box->y) * slice->pitch * 4 + (trans->base.box.x + box->x) * 4; uint8_t *stencil = fd_bo_map(rsc->stencil->bo) + sslice->offset + fd_resource_layer_offset(rsc->stencil, sslice, trans->base.box.z) + (trans->base.box.y + box->y) * sslice->pitch + trans->base.box.x + box->x; if (format != PIPE_FORMAT_X32_S8X24_UINT) util_format_z32_float_s8x24_uint_unpack_z_float( depth, slice->pitch * 4, trans->staging, trans->base.stride, box->width, box->height); util_format_z32_float_s8x24_uint_unpack_s_8uint( stencil, sslice->pitch, trans->staging, trans->base.stride, box->width, box->height); }
static void assemble_variant(struct ir3_shader_variant *v) { struct fd_context *ctx = fd_context(v->shader->pctx); uint32_t sz, *bin; bin = ir3_assemble(v->ir, &v->info, ctx->screen->gpu_id); sz = v->info.sizedwords * 4; v->bo = fd_bo_new(ctx->dev, sz, DRM_FREEDRENO_GEM_CACHE_WCOMBINE | DRM_FREEDRENO_GEM_TYPE_KMEM); memcpy(fd_bo_map(v->bo), bin, sz); free(bin); if (ctx->screen->gpu_id >= 400) { v->instrlen = v->info.sizedwords / (2 * 16); } else { v->instrlen = v->info.sizedwords / (2 * 4); } /* NOTE: if relative addressing is used, we set constlen in * the compiler (to worst-case value) since we don't know in * the assembler what the max addr reg value can be: */ v->constlen = MAX2(v->constlen, v->info.max_const + 1); /* no need to keep the ir around beyond this point: */ ir3_destroy(v->ir); v->ir = NULL; }
static void * fd_resource_transfer_map(struct pipe_context *pctx, struct pipe_resource *prsc, unsigned level, unsigned usage, const struct pipe_box *box, struct pipe_transfer **pptrans) { struct fd_context *ctx = fd_context(pctx); struct fd_resource *rsc = fd_resource(prsc); struct pipe_transfer *ptrans = util_slab_alloc(&ctx->transfer_pool); enum pipe_format format = prsc->format; char *buf; if (!ptrans) return NULL; ptrans->resource = prsc; ptrans->level = level; ptrans->usage = usage; ptrans->box = *box; ptrans->stride = rsc->pitch * rsc->cpp; ptrans->layer_stride = ptrans->stride; buf = fd_bo_map(rsc->bo); *pptrans = ptrans; return buf + box->y / util_format_get_blockheight(format) * ptrans->stride + box->x / util_format_get_blockwidth(format) * rsc->cpp; }
static void emit_shader(struct fd_ringbuffer *ring, struct fd3_shader_stateobj *so) { struct ir3_shader_info *si = &so->info; enum adreno_state_block sb; uint32_t i, *bin; if (so->type == SHADER_VERTEX) { sb = SB_VERT_SHADER; } else { sb = SB_FRAG_SHADER; } // XXX use SS_INDIRECT bin = fd_bo_map(so->bo); OUT_PKT3(ring, CP_LOAD_STATE, 2 + si->sizedwords); OUT_RING(ring, CP_LOAD_STATE_0_DST_OFF(0) | CP_LOAD_STATE_0_STATE_SRC(SS_DIRECT) | CP_LOAD_STATE_0_STATE_BLOCK(sb) | CP_LOAD_STATE_0_NUM_UNIT(so->instrlen)); OUT_RING(ring, CP_LOAD_STATE_1_STATE_TYPE(ST_SHADER) | CP_LOAD_STATE_1_EXT_SRC_ADDR(0)); for (i = 0; i < si->sizedwords; i++) OUT_RING(ring, bin[i]); }
static void emit_constants(struct fd_ringbuffer *ring, uint32_t base, struct fd_constbuf_stateobj *constbuf, struct fd2_shader_stateobj *shader) { uint32_t enabled_mask = constbuf->enabled_mask; uint32_t start_base = base; unsigned i; /* emit user constants: */ while (enabled_mask) { unsigned index = ffs(enabled_mask) - 1; struct pipe_constant_buffer *cb = &constbuf->cb[index]; unsigned size = align(cb->buffer_size, 4) / 4; /* size in dwords */ // I expect that size should be a multiple of vec4's: assert(size == align(size, 4)); /* hmm, sometimes we still seem to end up with consts bound, * even if shader isn't using them, which ends up overwriting * const reg's used for immediates.. this is a hack to work * around that: */ if (shader && ((base - start_base) >= (shader->first_immediate * 4))) break; const uint32_t *dwords; if (cb->user_buffer) { dwords = cb->user_buffer; } else { struct fd_resource *rsc = fd_resource(cb->buffer); dwords = fd_bo_map(rsc->bo); } dwords = (uint32_t *)(((uint8_t *)dwords) + cb->buffer_offset); OUT_PKT3(ring, CP_SET_CONSTANT, size + 1); OUT_RING(ring, base); for (i = 0; i < size; i++) OUT_RING(ring, *(dwords++)); base += size; enabled_mask &= ~(1 << index); } /* emit shader immediates: */ if (shader) { for (i = 0; i < shader->num_immediates; i++) { OUT_PKT3(ring, CP_SET_CONSTANT, 5); OUT_RING(ring, start_base + (4 * (shader->first_immediate + i))); OUT_RING(ring, shader->immediates[i].val[0]); OUT_RING(ring, shader->immediates[i].val[1]); OUT_RING(ring, shader->immediates[i].val[2]); OUT_RING(ring, shader->immediates[i].val[3]); base += 4; } } }
static struct fd3_shader_stateobj * create_shader(struct pipe_context *pctx, const struct pipe_shader_state *cso, enum shader_t type) { struct fd3_shader_stateobj *so = CALLOC_STRUCT(fd3_shader_stateobj); int ret; if (!so) return NULL; so->type = type; if (fd_mesa_debug & FD_DBG_DISASM) { DBG("dump tgsi: type=%d", so->type); tgsi_dump(cso->tokens, 0); } if (type == SHADER_FRAGMENT) { /* we seem to get wrong colors (maybe swap/endianess or hw issue?) * with full precision color reg. And blob driver only seems to * use half precision register for color output (that I can find * so far), even with highp precision. So for force half precision * for frag shader: */ so->half_precision = true; } ret = fd3_compile_shader(so, cso->tokens); if (ret) { debug_error("compile failed!"); goto fail; } assemble_shader(pctx, so); if (!so->bo) { debug_error("assemble failed!"); goto fail; } if (type == SHADER_VERTEX) fixup_vp_regfootprint(so); if (fd_mesa_debug & FD_DBG_DISASM) { DBG("disassemble: type=%d", so->type); disasm_a3xx(fd_bo_map(so->bo), so->info.sizedwords, 0, so->type); } return so; fail: delete_shader(so); return NULL; }
static void * fd_resource_transfer_map(struct pipe_context *pctx, struct pipe_resource *prsc, unsigned level, unsigned usage, const struct pipe_box *box, struct pipe_transfer **pptrans) { struct fd_context *ctx = fd_context(pctx); struct fd_resource *rsc = fd_resource(prsc); struct pipe_transfer *ptrans = util_slab_alloc(&ctx->transfer_pool); enum pipe_format format = prsc->format; uint32_t op = 0; char *buf; if (!ptrans) return NULL; /* util_slab_alloc() doesn't zero: */ memset(ptrans, 0, sizeof(*ptrans)); pipe_resource_reference(&ptrans->resource, prsc); ptrans->level = level; ptrans->usage = usage; ptrans->box = *box; ptrans->stride = rsc->pitch * rsc->cpp; ptrans->layer_stride = ptrans->stride; /* some state trackers (at least XA) don't do this.. */ if (!(usage & PIPE_TRANSFER_FLUSH_EXPLICIT)) fd_resource_transfer_flush_region(pctx, ptrans, box); buf = fd_bo_map(rsc->bo); if (!buf) { fd_resource_transfer_unmap(pctx, ptrans); return NULL; } if (usage & PIPE_TRANSFER_READ) op |= DRM_FREEDRENO_PREP_READ; if (usage & PIPE_TRANSFER_WRITE) op |= DRM_FREEDRENO_PREP_WRITE; if (!(usage & PIPE_TRANSFER_UNSYNCHRONIZED)) fd_bo_cpu_prep(rsc->bo, ctx->screen->pipe, op); *pptrans = ptrans; return buf + box->y / util_format_get_blockheight(format) * ptrans->stride + box->x / util_format_get_blockwidth(format) * rsc->cpp; }
static void emit_shader(struct fd_ringbuffer *ring, const struct ir3_shader_variant *so) { const struct ir3_info *si = &so->info; enum adreno_state_block sb; enum adreno_state_src src; uint32_t i, sz, *bin; if (so->type == SHADER_VERTEX) { sb = SB_VERT_SHADER; } else { sb = SB_FRAG_SHADER; } if (fd_mesa_debug & FD_DBG_DIRECT) { sz = si->sizedwords; src = SS_DIRECT; bin = fd_bo_map(so->bo); } else { sz = 0; src = 2; // enums different on a5xx.. bin = NULL; } OUT_PKT7(ring, CP_LOAD_STATE, 3 + sz); OUT_RING(ring, CP_LOAD_STATE_0_DST_OFF(0) | CP_LOAD_STATE_0_STATE_SRC(src) | CP_LOAD_STATE_0_STATE_BLOCK(sb) | CP_LOAD_STATE_0_NUM_UNIT(so->instrlen)); if (bin) { OUT_RING(ring, CP_LOAD_STATE_1_EXT_SRC_ADDR(0) | CP_LOAD_STATE_1_STATE_TYPE(ST_SHADER)); OUT_RING(ring, CP_LOAD_STATE_2_EXT_SRC_ADDR_HI(0)); } else { OUT_RELOC(ring, so->bo, 0, CP_LOAD_STATE_1_STATE_TYPE(ST_SHADER), 0); } /* for how clever coverity is, it is sometimes rather dull, and * doesn't realize that the only case where bin==NULL, sz==0: */ assume(bin || (sz == 0)); for (i = 0; i < sz; i++) { OUT_RING(ring, bin[i]); } }
static void assemble_shader(struct pipe_context *pctx, struct fd3_shader_stateobj *so) { struct fd_context *ctx = fd_context(pctx); uint32_t sz, *bin; bin = ir3_shader_assemble(so->ir, &so->info); sz = so->info.sizedwords * 4; so->bo = fd_bo_new(ctx->screen->dev, sz, DRM_FREEDRENO_GEM_CACHE_WCOMBINE | DRM_FREEDRENO_GEM_TYPE_KMEM); memcpy(fd_bo_map(so->bo), bin, sz); free(bin); so->instrlen = so->info.sizedwords / 8; so->constlen = so->info.max_const + 1; }
static void emit_shader(struct fd_ringbuffer *ring, const struct ir3_shader_variant *so) { const struct ir3_info *si = &so->info; enum adreno_state_block sb; enum adreno_state_src src; uint32_t i, sz, *bin; if (so->type == SHADER_VERTEX) { sb = SB_VERT_SHADER; } else { sb = SB_FRAG_SHADER; } if (fd_mesa_debug & FD_DBG_DIRECT) { sz = si->sizedwords; src = SS_DIRECT; bin = fd_bo_map(so->bo); } else { sz = 0; src = 2; // enums different on a4xx.. bin = NULL; } OUT_PKT3(ring, CP_LOAD_STATE, 2 + sz); OUT_RING(ring, CP_LOAD_STATE_0_DST_OFF(0) | CP_LOAD_STATE_0_STATE_SRC(src) | CP_LOAD_STATE_0_STATE_BLOCK(sb) | CP_LOAD_STATE_0_NUM_UNIT(so->instrlen)); if (bin) { OUT_RING(ring, CP_LOAD_STATE_1_EXT_SRC_ADDR(0) | CP_LOAD_STATE_1_STATE_TYPE(ST_SHADER)); } else { OUT_RELOC(ring, so->bo, 0, CP_LOAD_STATE_1_STATE_TYPE(ST_SHADER), 0); } for (i = 0; i < sz; i++) { OUT_RING(ring, bin[i]); } }
static void fd_resource_flush_rgtc(struct fd_transfer *trans, const struct pipe_box *box) { struct fd_resource *rsc = fd_resource(trans->base.resource); struct fd_resource_slice *slice = fd_resource_slice(rsc, trans->base.level); enum pipe_format format = trans->base.resource->format; uint8_t *data = fd_bo_map(rsc->bo) + slice->offset + fd_resource_layer_offset(rsc, slice, trans->base.box.z) + ((trans->base.box.y + box->y) * slice->pitch + trans->base.box.x + box->x) * rsc->cpp; uint8_t *source = trans->staging + util_format_get_nblocksy(format, box->y) * trans->base.stride + util_format_get_stride(format, box->x); switch (format) { case PIPE_FORMAT_RGTC1_UNORM: case PIPE_FORMAT_RGTC1_SNORM: case PIPE_FORMAT_LATC1_UNORM: case PIPE_FORMAT_LATC1_SNORM: util_format_rgtc1_unorm_unpack_rgba_8unorm( data, slice->pitch * rsc->cpp, source, trans->base.stride, box->width, box->height); break; case PIPE_FORMAT_RGTC2_UNORM: case PIPE_FORMAT_RGTC2_SNORM: case PIPE_FORMAT_LATC2_UNORM: case PIPE_FORMAT_LATC2_SNORM: util_format_rgtc2_unorm_unpack_rgba_8unorm( data, slice->pitch * rsc->cpp, source, trans->base.stride, box->width, box->height); break; default: assert(!"Unexpected format\n"); break; } }
static void assemble_variant(struct ir3_shader_variant *v) { struct fd_context *ctx = fd_context(v->shader->pctx); uint32_t sz, *bin; bin = ir3_assemble(v->ir, &v->info); sz = v->info.sizedwords * 4; v->bo = fd_bo_new(ctx->dev, sz, DRM_FREEDRENO_GEM_CACHE_WCOMBINE | DRM_FREEDRENO_GEM_TYPE_KMEM); memcpy(fd_bo_map(v->bo), bin, sz); free(bin); v->instrlen = v->info.sizedwords / 8; /* NOTE: if relative addressing is used, we set constlen in * the compiler (to worst-case value) since we don't know in * the assembler what the max addr reg value can be: */ v->constlen = MAX2(v->constlen, v->info.max_const + 1); }
static void * fd_resource_transfer_map(struct pipe_context *pctx, struct pipe_resource *prsc, unsigned level, unsigned usage, const struct pipe_box *box, struct pipe_transfer **pptrans) { struct fd_context *ctx = fd_context(pctx); struct fd_resource *rsc = fd_resource(prsc); struct fd_resource_slice *slice = fd_resource_slice(rsc, level); struct fd_transfer *trans; struct pipe_transfer *ptrans; enum pipe_format format = prsc->format; uint32_t op = 0; uint32_t offset; char *buf; int ret = 0; DBG("prsc=%p, level=%u, usage=%x, box=%dx%d+%d,%d", prsc, level, usage, box->width, box->height, box->x, box->y); ptrans = util_slab_alloc(&ctx->transfer_pool); if (!ptrans) return NULL; /* util_slab_alloc() doesn't zero: */ trans = fd_transfer(ptrans); memset(trans, 0, sizeof(*trans)); pipe_resource_reference(&ptrans->resource, prsc); ptrans->level = level; ptrans->usage = usage; ptrans->box = *box; ptrans->stride = util_format_get_nblocksx(format, slice->pitch) * rsc->cpp; ptrans->layer_stride = rsc->layer_first ? rsc->layer_size : slice->size0; if (usage & PIPE_TRANSFER_READ) op |= DRM_FREEDRENO_PREP_READ; if (usage & PIPE_TRANSFER_WRITE) op |= DRM_FREEDRENO_PREP_WRITE; if (usage & PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE) { realloc_bo(rsc, fd_bo_size(rsc->bo)); if (rsc->stencil) realloc_bo(rsc->stencil, fd_bo_size(rsc->stencil->bo)); fd_invalidate_resource(ctx, prsc); } else if ((usage & PIPE_TRANSFER_WRITE) && prsc->target == PIPE_BUFFER && !util_ranges_intersect(&rsc->valid_buffer_range, box->x, box->x + box->width)) { /* We are trying to write to a previously uninitialized range. No need * to wait. */ } else if (!(usage & PIPE_TRANSFER_UNSYNCHRONIZED)) { /* If the GPU is writing to the resource, or if it is reading from the * resource and we're trying to write to it, flush the renders. */ if (((ptrans->usage & PIPE_TRANSFER_WRITE) && pending(rsc, FD_PENDING_READ | FD_PENDING_WRITE)) || pending(rsc, FD_PENDING_WRITE)) fd_context_render(pctx); /* The GPU keeps track of how the various bo's are being used, and * will wait if necessary for the proper operation to have * completed. */ ret = fd_bo_cpu_prep(rsc->bo, ctx->screen->pipe, op); if (ret) goto fail; } buf = fd_bo_map(rsc->bo); if (!buf) goto fail; offset = slice->offset + box->y / util_format_get_blockheight(format) * ptrans->stride + box->x / util_format_get_blockwidth(format) * rsc->cpp + fd_resource_layer_offset(rsc, slice, box->z); if (prsc->format == PIPE_FORMAT_Z32_FLOAT_S8X24_UINT || prsc->format == PIPE_FORMAT_X32_S8X24_UINT) { assert(trans->base.box.depth == 1); trans->base.stride = trans->base.box.width * rsc->cpp * 2; trans->staging = malloc(trans->base.stride * trans->base.box.height); if (!trans->staging) goto fail; /* if we're not discarding the whole range (or resource), we must copy * the real data in. */ if (!(usage & (PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE | PIPE_TRANSFER_DISCARD_RANGE))) { struct fd_resource_slice *sslice = fd_resource_slice(rsc->stencil, level); void *sbuf = fd_bo_map(rsc->stencil->bo); if (!sbuf) goto fail; float *depth = (float *)(buf + slice->offset + fd_resource_layer_offset(rsc, slice, box->z) + box->y * slice->pitch * 4 + box->x * 4); uint8_t *stencil = sbuf + sslice->offset + fd_resource_layer_offset(rsc->stencil, sslice, box->z) + box->y * sslice->pitch + box->x; if (format != PIPE_FORMAT_X32_S8X24_UINT) util_format_z32_float_s8x24_uint_pack_z_float( trans->staging, trans->base.stride, depth, slice->pitch * 4, box->width, box->height); util_format_z32_float_s8x24_uint_pack_s_8uint( trans->staging, trans->base.stride, stencil, sslice->pitch, box->width, box->height); } buf = trans->staging; offset = 0; } else if (rsc->internal_format != format && util_format_description(format)->layout == UTIL_FORMAT_LAYOUT_RGTC) { assert(trans->base.box.depth == 1); trans->base.stride = util_format_get_stride( format, trans->base.box.width); trans->staging = malloc( util_format_get_2d_size(format, trans->base.stride, trans->base.box.height)); if (!trans->staging) goto fail; /* if we're not discarding the whole range (or resource), we must copy * the real data in. */ if (!(usage & (PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE | PIPE_TRANSFER_DISCARD_RANGE))) { uint8_t *rgba8 = (uint8_t *)buf + slice->offset + fd_resource_layer_offset(rsc, slice, box->z) + box->y * slice->pitch * rsc->cpp + box->x * rsc->cpp; switch (format) { case PIPE_FORMAT_RGTC1_UNORM: case PIPE_FORMAT_RGTC1_SNORM: case PIPE_FORMAT_LATC1_UNORM: case PIPE_FORMAT_LATC1_SNORM: util_format_rgtc1_unorm_pack_rgba_8unorm( trans->staging, trans->base.stride, rgba8, slice->pitch * rsc->cpp, box->width, box->height); break; case PIPE_FORMAT_RGTC2_UNORM: case PIPE_FORMAT_RGTC2_SNORM: case PIPE_FORMAT_LATC2_UNORM: case PIPE_FORMAT_LATC2_SNORM: util_format_rgtc2_unorm_pack_rgba_8unorm( trans->staging, trans->base.stride, rgba8, slice->pitch * rsc->cpp, box->width, box->height); break; default: assert(!"Unexpected format"); break; } } buf = trans->staging; offset = 0; } *pptrans = ptrans; return buf + offset; fail: fd_resource_transfer_unmap(pctx, ptrans); return NULL; }
void ir3_emit_vs_consts(const struct ir3_shader_variant *v, struct fd_ringbuffer *ring, struct fd_context *ctx, const struct pipe_draw_info *info) { debug_assert(v->type == MESA_SHADER_VERTEX); emit_common_consts(v, ring, ctx, PIPE_SHADER_VERTEX); /* emit driver params every time: */ /* TODO skip emit if shader doesn't use driver params to avoid WFI.. */ if (info) { const struct ir3_const_state *const_state = &v->shader->const_state; uint32_t offset = const_state->offsets.driver_param; if (v->constlen > offset) { uint32_t vertex_params[IR3_DP_VS_COUNT] = { [IR3_DP_VTXID_BASE] = info->index_size ? info->index_bias : info->start, [IR3_DP_VTXCNT_MAX] = max_tf_vtx(ctx, v), }; /* if no user-clip-planes, we don't need to emit the * entire thing: */ uint32_t vertex_params_size = 4; if (v->key.ucp_enables) { struct pipe_clip_state *ucp = &ctx->ucp; unsigned pos = IR3_DP_UCP0_X; for (unsigned i = 0; pos <= IR3_DP_UCP7_W; i++) { for (unsigned j = 0; j < 4; j++) { vertex_params[pos] = fui(ucp->ucp[i][j]); pos++; } } vertex_params_size = ARRAY_SIZE(vertex_params); } ring_wfi(ctx->batch, ring); bool needs_vtxid_base = ir3_find_sysval_regid(v, SYSTEM_VALUE_VERTEX_ID_ZERO_BASE) != regid(63, 0); /* for indirect draw, we need to copy VTXID_BASE from * indirect-draw parameters buffer.. which is annoying * and means we can't easily emit these consts in cmd * stream so need to copy them to bo. */ if (info->indirect && needs_vtxid_base) { struct pipe_draw_indirect_info *indirect = info->indirect; struct pipe_resource *vertex_params_rsc = pipe_buffer_create(&ctx->screen->base, PIPE_BIND_CONSTANT_BUFFER, PIPE_USAGE_STREAM, vertex_params_size * 4); unsigned src_off = info->indirect->offset;; void *ptr; ptr = fd_bo_map(fd_resource(vertex_params_rsc)->bo); memcpy(ptr, vertex_params, vertex_params_size * 4); if (info->index_size) { /* indexed draw, index_bias is 4th field: */ src_off += 3 * 4; } else { /* non-indexed draw, start is 3rd field: */ src_off += 2 * 4; } /* copy index_bias or start from draw params: */ ctx->mem_to_mem(ring, vertex_params_rsc, 0, indirect->buffer, src_off, 1); ctx->emit_const(ring, MESA_SHADER_VERTEX, offset * 4, 0, vertex_params_size, NULL, vertex_params_rsc); pipe_resource_reference(&vertex_params_rsc, NULL); } else { ctx->emit_const(ring, MESA_SHADER_VERTEX, offset * 4, 0, vertex_params_size, vertex_params, NULL); } /* if needed, emit stream-out buffer addresses: */ if (vertex_params[IR3_DP_VTXCNT_MAX] > 0) { emit_tfbos(ctx, v, ring); } } }
static void * fd_resource_transfer_map(struct pipe_context *pctx, struct pipe_resource *prsc, unsigned level, unsigned usage, const struct pipe_box *box, struct pipe_transfer **pptrans) { struct fd_context *ctx = fd_context(pctx); struct fd_resource *rsc = fd_resource(prsc); struct fd_resource_slice *slice = fd_resource_slice(rsc, level); struct pipe_transfer *ptrans; enum pipe_format format = prsc->format; uint32_t op = 0; char *buf; int ret = 0; ptrans = util_slab_alloc(&ctx->transfer_pool); if (!ptrans) return NULL; /* util_slab_alloc() doesn't zero: */ memset(ptrans, 0, sizeof(*ptrans)); pipe_resource_reference(&ptrans->resource, prsc); ptrans->level = level; ptrans->usage = usage; ptrans->box = *box; ptrans->stride = slice->pitch * rsc->cpp; ptrans->layer_stride = slice->size0; if (usage & PIPE_TRANSFER_READ) op |= DRM_FREEDRENO_PREP_READ; if (usage & PIPE_TRANSFER_WRITE) op |= DRM_FREEDRENO_PREP_WRITE; if (usage & PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE) op |= DRM_FREEDRENO_PREP_NOSYNC; /* some state trackers (at least XA) don't do this.. */ if (!(usage & (PIPE_TRANSFER_FLUSH_EXPLICIT | PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE))) fd_resource_transfer_flush_region(pctx, ptrans, box); if (!(usage & PIPE_TRANSFER_UNSYNCHRONIZED)) { ret = fd_bo_cpu_prep(rsc->bo, ctx->screen->pipe, op); if ((ret == -EBUSY) && (usage & PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE)) realloc_bo(rsc, fd_bo_size(rsc->bo)); else if (ret) goto fail; } buf = fd_bo_map(rsc->bo); if (!buf) { fd_resource_transfer_unmap(pctx, ptrans); return NULL; } *pptrans = ptrans; return buf + slice->offset + box->y / util_format_get_blockheight(format) * ptrans->stride + box->x / util_format_get_blockwidth(format) * rsc->cpp + box->z * slice->size0; fail: fd_resource_transfer_unmap(pctx, ptrans); return NULL; }
static void * fd_resource_transfer_map(struct pipe_context *pctx, struct pipe_resource *prsc, unsigned level, unsigned usage, const struct pipe_box *box, struct pipe_transfer **pptrans) { struct fd_context *ctx = fd_context(pctx); struct fd_resource *rsc = fd_resource(prsc); struct fd_resource_slice *slice = fd_resource_slice(rsc, level); struct pipe_transfer *ptrans; enum pipe_format format = prsc->format; uint32_t op = 0; uint32_t offset; char *buf; int ret = 0; DBG("prsc=%p, level=%u, usage=%x", prsc, level, usage); ptrans = util_slab_alloc(&ctx->transfer_pool); if (!ptrans) return NULL; /* util_slab_alloc() doesn't zero: */ memset(ptrans, 0, sizeof(*ptrans)); pipe_resource_reference(&ptrans->resource, prsc); ptrans->level = level; ptrans->usage = usage; ptrans->box = *box; ptrans->stride = slice->pitch * rsc->cpp; ptrans->layer_stride = slice->size0; if (usage & PIPE_TRANSFER_READ) op |= DRM_FREEDRENO_PREP_READ; if (usage & PIPE_TRANSFER_WRITE) op |= DRM_FREEDRENO_PREP_WRITE; if (usage & PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE) { realloc_bo(rsc, fd_bo_size(rsc->bo)); fd_invalidate_resource(ctx, prsc); } else if ((usage & PIPE_TRANSFER_WRITE) && prsc->target == PIPE_BUFFER && !util_ranges_intersect(&rsc->valid_buffer_range, box->x, box->x + box->width)) { /* We are trying to write to a previously uninitialized range. No need * to wait. */ } else if (!(usage & PIPE_TRANSFER_UNSYNCHRONIZED)) { /* If the GPU is writing to the resource, or if it is reading from the * resource and we're trying to write to it, flush the renders. */ if (rsc->dirty || ((ptrans->usage & PIPE_TRANSFER_WRITE) && rsc->reading)) fd_context_render(pctx); /* The GPU keeps track of how the various bo's are being used, and * will wait if necessary for the proper operation to have * completed. */ ret = fd_bo_cpu_prep(rsc->bo, ctx->screen->pipe, op); if (ret) goto fail; } buf = fd_bo_map(rsc->bo); if (!buf) { fd_resource_transfer_unmap(pctx, ptrans); return NULL; } *pptrans = ptrans; if (rsc->layer_first) { offset = slice->offset + box->y / util_format_get_blockheight(format) * ptrans->stride + box->x / util_format_get_blockwidth(format) * rsc->cpp + box->z * rsc->layer_size; } else { offset = slice->offset + box->y / util_format_get_blockheight(format) * ptrans->stride + box->x / util_format_get_blockwidth(format) * rsc->cpp + box->z * slice->size0; } return buf + offset; fail: fd_resource_transfer_unmap(pctx, ptrans); return NULL; }
int main(int argc, char *argv[]) { struct fd_device *dev; struct fd_pipe *pipe; struct fd_ringbuffer *ring; struct fd_bo *bo; uint32_t i = 0; uint32_t *ptr; int fd, ret; fd = drmOpen("msm", NULL); if (fd < 0) { printf("failed to initialize DRM\n"); return fd; } dev = fd_device_new(fd); if (!dev) { printf("failed to initialize freedreno device\n"); return -1; } pipe = fd_pipe_new(dev, FD_PIPE_3D); if (!pipe) { printf("failed to initialize freedreno pipe\n"); return -1; } ring = fd_ringbuffer_new(pipe, 4096); if (!ring) { printf("failed to initialize freedreno ring\n"); return -1; } bo = fd_bo_new(dev, 4096, 0); #define BASE REG_A3XX_GRAS_CL_VPORT_XOFFSET #define SIZE 6 OUT_PKT0(ring, REG_AXXX_CP_SCRATCH_REG4, 1); OUT_RING(ring, 0x123); OUT_PKT0(ring, BASE, SIZE); for (i = 0; i < SIZE; i++) OUT_RING(ring, i); /* this adds the value of CP_SCRATCH_REG4 to 0x111 and writes * to REG_BASE+2 */ OUT_PKT3(ring, CP_SET_CONSTANT, 3); OUT_RING(ring, 0x80000000 | CP_REG(BASE + 2)); OUT_RING(ring, REG_AXXX_CP_SCRATCH_REG4); OUT_RING(ring, 0x111); /* read back all the regs: */ for (i = 0; i < SIZE; i++) { OUT_PKT3(ring, CP_REG_TO_MEM, 2); OUT_RING(ring, BASE + i); OUT_RELOCW(ring, bo, i * 4, 0, 0); } fd_ringbuffer_flush(ring); /* and read back the values: */ fd_bo_cpu_prep(bo, pipe, DRM_FREEDRENO_PREP_READ); ptr = fd_bo_map(bo); for (i = 0; i < SIZE; i++) { printf("%02x: %08x\n", i, ptr[i]); } fd_bo_cpu_fini(bo); return 0; }