static struct vc4_resource * vc4_resource_setup(struct pipe_screen *pscreen, const struct pipe_resource *tmpl) { struct vc4_resource *rsc = CALLOC_STRUCT(vc4_resource); if (!rsc) return NULL; struct pipe_resource *prsc = &rsc->base.b; *prsc = *tmpl; pipe_reference_init(&prsc->reference, 1); prsc->screen = pscreen; rsc->base.vtbl = &vc4_resource_vtbl; rsc->cpp = util_format_get_blocksize(tmpl->format); assert(rsc->cpp); return rsc; }
static struct pipe_surface *noop_create_surface(struct pipe_context *ctx, struct pipe_resource *texture, const struct pipe_surface *surf_tmpl) { struct pipe_surface *surface = CALLOC_STRUCT(pipe_surface); if (surface == NULL) return NULL; pipe_reference_init(&surface->reference, 1); pipe_resource_reference(&surface->texture, texture); surface->context = ctx; surface->format = surf_tmpl->format; surface->width = texture->width0; surface->height = texture->height0; surface->texture = texture; surface->u.tex.first_layer = surf_tmpl->u.tex.first_layer; surface->u.tex.last_layer = surf_tmpl->u.tex.last_layer; surface->u.tex.level = surf_tmpl->u.tex.level; return surface; }
struct pipe_resource *r600_buffer_create(struct pipe_screen *screen, const struct pipe_resource *templ, unsigned alignment) { struct r600_screen *rscreen = (struct r600_screen*)screen; struct r600_resource *rbuffer; rbuffer = MALLOC_STRUCT(r600_resource); rbuffer->b.b = *templ; pipe_reference_init(&rbuffer->b.b.reference, 1); rbuffer->b.b.screen = screen; rbuffer->b.vtbl = &r600_buffer_vtbl; util_range_init(&rbuffer->valid_buffer_range); if (!r600_init_resource(&rscreen->b, rbuffer, templ->width0, alignment, TRUE, templ->usage)) { FREE(rbuffer); return NULL; } return &rbuffer->b.b; }
struct pipe_resource *r600_buffer_create(struct pipe_screen *screen, const struct pipe_resource *templ) { struct r600_screen *rscreen = (struct r600_screen*)screen; struct r600_resource *rbuffer; /* XXX We probably want a different alignment for buffers and textures. */ unsigned alignment = 4096; rbuffer = MALLOC_STRUCT(r600_resource); rbuffer->b.b = *templ; pipe_reference_init(&rbuffer->b.b.reference, 1); rbuffer->b.b.screen = screen; rbuffer->b.vtbl = &r600_buffer_vtbl; if (!r600_init_resource(rscreen, rbuffer, templ->width0, alignment, templ->bind, templ->usage)) { FREE(rbuffer); return NULL; } return &rbuffer->b.b; }
struct pipe_resource *r300_buffer_create(struct pipe_screen *screen, const struct pipe_resource *templ) { struct r300_screen *r300screen = r300_screen(screen); struct r300_resource *rbuf; rbuf = MALLOC_STRUCT(r300_resource); rbuf->b.b = *templ; rbuf->b.vtbl = &r300_buffer_vtbl; pipe_reference_init(&rbuf->b.b.reference, 1); rbuf->b.b.screen = screen; rbuf->domain = RADEON_DOMAIN_GTT; rbuf->buf = NULL; rbuf->malloced_buffer = NULL; /* Allocate constant buffers and SWTCL vertex and index buffers in RAM. * Note that uploaded index buffers use the flag PIPE_BIND_CUSTOM, so that * we can distinguish them from user-created buffers. */ if (templ->bind & PIPE_BIND_CONSTANT_BUFFER || (!r300screen->caps.has_tcl && !(templ->bind & PIPE_BIND_CUSTOM))) { rbuf->malloced_buffer = align_malloc(templ->width0, 64); return &rbuf->b.b; } rbuf->buf = r300screen->rws->buffer_create(r300screen->rws, rbuf->b.b.width0, R300_BUFFER_ALIGNMENT, TRUE, rbuf->domain, 0); if (!rbuf->buf) { FREE(rbuf); return NULL; } rbuf->cs_buf = r300screen->rws->buffer_get_cs_handle(rbuf->buf); return &rbuf->b.b; }
/** * Create a texture from a winsys_handle. The handle is often created in * another process by first creating a pipe texture and then calling * resource_get_handle. */ static struct pipe_resource * fd_resource_from_handle(struct pipe_screen *pscreen, const struct pipe_resource *tmpl, struct winsys_handle *handle) { struct fd_resource *rsc = CALLOC_STRUCT(fd_resource); struct fd_resource_slice *slice = &rsc->slices[0]; struct pipe_resource *prsc = &rsc->base.b; DBG("target=%d, format=%s, %ux%ux%u, array_size=%u, last_level=%u, " "nr_samples=%u, usage=%u, bind=%x, flags=%x", tmpl->target, util_format_name(tmpl->format), tmpl->width0, tmpl->height0, tmpl->depth0, tmpl->array_size, tmpl->last_level, tmpl->nr_samples, tmpl->usage, tmpl->bind, tmpl->flags); if (!rsc) return NULL; *prsc = *tmpl; pipe_reference_init(&prsc->reference, 1); prsc->screen = pscreen; rsc->bo = fd_screen_bo_from_handle(pscreen, handle, &slice->pitch); if (!rsc->bo) goto fail; rsc->base.vtbl = &fd_resource_vtbl; rsc->cpp = util_format_get_blocksize(tmpl->format); slice->pitch /= rsc->cpp; assert(rsc->cpp); return prsc; fail: fd_resource_destroy(pscreen, prsc); return NULL; }
struct pipe_resource *r600_compute_global_buffer_create( struct pipe_screen *screen, const struct pipe_resource *templ) { struct r600_resource_global* result = NULL; struct r600_screen* rscreen = NULL; int size_in_dw = 0; assert(templ->target == PIPE_BUFFER); assert(templ->bind & PIPE_BIND_GLOBAL); assert(templ->array_size == 1 || templ->array_size == 0); assert(templ->depth0 == 1 || templ->depth0 == 0); assert(templ->height0 == 1 || templ->height0 == 0); result = (struct r600_resource_global*) CALLOC(sizeof(struct r600_resource_global), 1); rscreen = (struct r600_screen*)screen; COMPUTE_DBG(rscreen, "*** r600_compute_global_buffer_create\n"); COMPUTE_DBG(rscreen, "width = %u array_size = %u\n", templ->width0, templ->array_size); result->base.b.vtbl = &r600_global_buffer_vtbl; result->base.b.b.screen = screen; result->base.b.b = *templ; pipe_reference_init(&result->base.b.b.reference, 1); size_in_dw = (templ->width0+3) / 4; result->chunk = compute_memory_alloc(rscreen->global_pool, size_in_dw); if (result->chunk == NULL) { free(result); return NULL; } return &result->base.b.b; }
/** * Create a new fence object. * * The rank will be the number of bins in the scene. Whenever a rendering * thread hits a fence command, it'll increment the fence counter. When * the counter == the rank, the fence is finished. * * \param rank the expected finished value of the fence counter. */ struct lp_fence * lp_fence_create(unsigned rank) { static int fence_id; struct lp_fence *fence = CALLOC_STRUCT(lp_fence); if (!fence) return NULL; pipe_reference_init(&fence->reference, 1); pipe_mutex_init(fence->mutex); pipe_condvar_init(fence->signalled); fence->id = fence_id++; fence->rank = rank; if (LP_DEBUG & DEBUG_FENCE) debug_printf("%s %d\n", __FUNCTION__, fence->id); return fence; }
/** * Create a new texture object, using the given template info. */ static struct pipe_resource * fd_resource_create(struct pipe_screen *pscreen, const struct pipe_resource *tmpl) { struct fd_resource *rsc = CALLOC_STRUCT(fd_resource); struct pipe_resource *prsc = &rsc->base.b; uint32_t size; DBG("target=%d, format=%s, %ux%ux%u, array_size=%u, last_level=%u, " "nr_samples=%u, usage=%u, bind=%x, flags=%x", tmpl->target, util_format_name(tmpl->format), tmpl->width0, tmpl->height0, tmpl->depth0, tmpl->array_size, tmpl->last_level, tmpl->nr_samples, tmpl->usage, tmpl->bind, tmpl->flags); if (!rsc) return NULL; *prsc = *tmpl; pipe_reference_init(&prsc->reference, 1); prsc->screen = pscreen; rsc->base.vtbl = &fd_resource_vtbl; rsc->cpp = util_format_get_blocksize(tmpl->format); assert(rsc->cpp); size = setup_slices(rsc); realloc_bo(rsc, size); if (!rsc->bo) goto fail; return prsc; fail: fd_resource_destroy(pscreen, prsc); return NULL; }
struct pipe_resource *r300_buffer_create(struct pipe_screen *screen, const struct pipe_resource *templ) { struct r300_screen *r300screen = r300_screen(screen); struct r300_resource *rbuf; unsigned alignment = 16; rbuf = MALLOC_STRUCT(r300_resource); rbuf->b.b = *templ; rbuf->b.vtbl = &r300_buffer_vtbl; pipe_reference_init(&rbuf->b.b.reference, 1); rbuf->b.b.screen = screen; rbuf->domain = RADEON_DOMAIN_GTT; rbuf->buf = NULL; rbuf->malloced_buffer = NULL; /* Alloc constant buffers and SWTCL buffers in RAM. */ if (templ->bind & PIPE_BIND_CONSTANT_BUFFER || (!r300screen->caps.has_tcl && (templ->bind & (PIPE_BIND_VERTEX_BUFFER | PIPE_BIND_INDEX_BUFFER)))) { rbuf->malloced_buffer = MALLOC(templ->width0); return &rbuf->b.b; } rbuf->buf = r300screen->rws->buffer_create(r300screen->rws, rbuf->b.b.width0, alignment, rbuf->b.b.bind, rbuf->domain); if (!rbuf->buf) { FREE(rbuf); return NULL; } rbuf->cs_buf = r300screen->rws->buffer_get_cs_handle(rbuf->buf); return &rbuf->b.b; }
struct pipe_surface *r600_create_surface_custom(struct pipe_context *pipe, struct pipe_resource *texture, const struct pipe_surface *templ, unsigned width, unsigned height) { struct r600_surface *surface = CALLOC_STRUCT(r600_surface); if (surface == NULL) return NULL; assert(templ->u.tex.first_layer <= util_max_layer(texture, templ->u.tex.level)); assert(templ->u.tex.last_layer <= util_max_layer(texture, templ->u.tex.level)); pipe_reference_init(&surface->base.reference, 1); pipe_resource_reference(&surface->base.texture, texture); surface->base.context = pipe; surface->base.format = templ->format; surface->base.width = width; surface->base.height = height; surface->base.u = templ->u; return &surface->base; }
static struct pipe_resource *noop_resource_create(struct pipe_screen *screen, const struct pipe_resource *templ) { struct noop_resource *nresource; unsigned stride; nresource = CALLOC_STRUCT(noop_resource); if (nresource == NULL) return NULL; stride = util_format_get_stride(templ->format, templ->width0); nresource->base = *templ; nresource->base.screen = screen; nresource->size = stride * templ->height0 * templ->depth0; nresource->data = MALLOC(nresource->size); pipe_reference_init(&nresource->base.reference, 1); if (nresource->data == NULL) { FREE(nresource); return NULL; } return &nresource->base; }
struct pipe_resource *r300_buffer_create(struct pipe_screen *screen, const struct pipe_resource *templ) { struct r300_screen *r300screen = r300_screen(screen); struct r300_resource *rbuf; unsigned alignment = 16; rbuf = util_slab_alloc(&r300screen->pool_buffers); rbuf->b.b = *templ; rbuf->b.vtbl = &r300_buffer_vtbl; pipe_reference_init(&rbuf->b.b.reference, 1); rbuf->b.b.screen = screen; rbuf->b.b.user_ptr = NULL; rbuf->domain = RADEON_DOMAIN_GTT; rbuf->buf = NULL; rbuf->constant_buffer = NULL; /* Alloc constant buffers in RAM. */ if (templ->bind & PIPE_BIND_CONSTANT_BUFFER) { rbuf->constant_buffer = MALLOC(templ->width0); return &rbuf->b.b; } rbuf->buf = r300screen->rws->buffer_create(r300screen->rws, rbuf->b.b.width0, alignment, rbuf->b.b.bind, rbuf->domain); if (!rbuf->buf) { util_slab_free(&r300screen->pool_buffers, rbuf); return NULL; } rbuf->cs_buf = r300screen->rws->buffer_get_cs_handle(rbuf->buf); return &rbuf->b.b; }
static struct pipe_surface * swr_create_surface(struct pipe_context *pipe, struct pipe_resource *pt, const struct pipe_surface *surf_tmpl) { struct pipe_surface *ps; ps = CALLOC_STRUCT(pipe_surface); if (ps) { pipe_reference_init(&ps->reference, 1); pipe_resource_reference(&ps->texture, pt); ps->context = pipe; ps->format = surf_tmpl->format; if (pt->target != PIPE_BUFFER) { assert(surf_tmpl->u.tex.level <= pt->last_level); ps->width = u_minify(pt->width0, surf_tmpl->u.tex.level); ps->height = u_minify(pt->height0, surf_tmpl->u.tex.level); ps->u.tex.level = surf_tmpl->u.tex.level; ps->u.tex.first_layer = surf_tmpl->u.tex.first_layer; ps->u.tex.last_layer = surf_tmpl->u.tex.last_layer; if (ps->u.tex.first_layer != ps->u.tex.last_layer) { debug_printf("creating surface with multiple layers, rendering " "to first layer only\n"); } } else { /* setting width as number of elements should get us correct * renderbuffer width */ ps->width = surf_tmpl->u.buf.last_element - surf_tmpl->u.buf.first_element + 1; ps->height = pt->height0; ps->u.buf.first_element = surf_tmpl->u.buf.first_element; ps->u.buf.last_element = surf_tmpl->u.buf.last_element; assert(ps->u.buf.first_element <= ps->u.buf.last_element); assert(ps->u.buf.last_element < ps->width); } } return ps; }
struct pipe_surface * nv30_miptree_surface_new(struct pipe_context *pipe, struct pipe_resource *pt, const struct pipe_surface *tmpl) { struct nv30_miptree *mt = nv30_miptree(pt); /* guaranteed */ struct nv30_surface *ns; struct pipe_surface *ps; struct nv30_miptree_level *lvl = &mt->level[tmpl->u.tex.level]; ns = CALLOC_STRUCT(nv30_surface); if (!ns) return NULL; ps = &ns->base; pipe_reference_init(&ps->reference, 1); pipe_resource_reference(&ps->texture, pt); ps->context = pipe; ps->format = tmpl->format; ps->usage = tmpl->usage; ps->u.tex.level = tmpl->u.tex.level; ps->u.tex.first_layer = tmpl->u.tex.first_layer; ps->u.tex.last_layer = tmpl->u.tex.last_layer; ns->width = u_minify(pt->width0, ps->u.tex.level); ns->height = u_minify(pt->height0, ps->u.tex.level); ns->depth = ps->u.tex.last_layer - ps->u.tex.first_layer + 1; ns->offset = layer_offset(pt, ps->u.tex.level, ps->u.tex.first_layer); if (mt->swizzled) ns->pitch = 4096; /* random, just something the hw won't reject.. */ else ns->pitch = lvl->pitch; /* comment says there are going to be removed, but they're used by the st */ ps->width = ns->width; ps->height = ns->height; return ps; }
/** * Create a new texture object, using the given template info. */ static struct pipe_resource * fd_resource_create(struct pipe_screen *pscreen, const struct pipe_resource *tmpl) { struct fd_screen *screen = fd_screen(pscreen); struct fd_resource *rsc = CALLOC_STRUCT(fd_resource); struct pipe_resource *prsc = &rsc->base.b; uint32_t flags, size; DBG("target=%d, format=%s, %ux%u@%u, array_size=%u, last_level=%u, " "nr_samples=%u, usage=%u, bind=%x, flags=%x", tmpl->target, util_format_name(tmpl->format), tmpl->width0, tmpl->height0, tmpl->depth0, tmpl->array_size, tmpl->last_level, tmpl->nr_samples, tmpl->usage, tmpl->bind, tmpl->flags); if (!rsc) return NULL; *prsc = *tmpl; pipe_reference_init(&prsc->reference, 1); prsc->screen = pscreen; rsc->base.vtbl = &fd_resource_vtbl; rsc->pitch = align(tmpl->width0, 32); rsc->cpp = util_format_get_blocksize(tmpl->format); assert(rsc->cpp); size = rsc->pitch * tmpl->height0 * rsc->cpp; flags = DRM_FREEDRENO_GEM_CACHE_WCOMBINE | DRM_FREEDRENO_GEM_TYPE_KMEM; /* TODO */ rsc->bo = fd_bo_new(screen->dev, size, flags); return prsc; }
static struct pipe_buffer * xm_buffer_create(struct pipe_winsys *pws, unsigned alignment, unsigned usage, unsigned size) { struct xm_buffer *buffer = CALLOC_STRUCT(xm_buffer); pipe_reference_init(&buffer->base.reference, 1); buffer->base.alignment = alignment; buffer->base.usage = usage; buffer->base.size = size; if (buffer->data == NULL) { buffer->shm = 0; /* align to 16-byte multiple for Cell */ buffer->data = align_malloc(size, max(alignment, 16)); } return &buffer->base; }
struct v3d_fence * v3d_fence_create(struct v3d_context *v3d) { struct v3d_fence *f = calloc(1, sizeof(*f)); if (!f) return NULL; /* Snapshot the last V3D rendering's out fence. We'd rather have * another syncobj instead of a sync file, but this is all we get. * (HandleToFD/FDToHandle just gives you another syncobj ID for the * same syncobj). */ drmSyncobjExportSyncFile(v3d->fd, v3d->out_sync, &f->fd); if (f->fd == -1) { fprintf(stderr, "export failed\n"); free(f); return NULL; } pipe_reference_init(&f->reference, 1); return f; }
static struct pb_buffer * vmw_gmr_bufmgr_create_buffer(struct pb_manager *_mgr, pb_size size, const struct pb_desc *desc) { struct vmw_gmr_bufmgr *mgr = vmw_gmr_bufmgr(_mgr); struct vmw_winsys_screen *vws = mgr->vws; struct vmw_gmr_buffer *buf; buf = CALLOC_STRUCT(vmw_gmr_buffer); if(!buf) goto error1; pipe_reference_init(&buf->base.base.reference, 1); buf->base.base.alignment = desc->alignment; buf->base.base.usage = desc->usage; buf->base.base.size = size; buf->base.vtbl = &vmw_gmr_buffer_vtbl; buf->mgr = mgr; buf->region = vmw_ioctl_region_create(vws, size); if(!buf->region) goto error2; buf->map = vmw_ioctl_region_map(buf->region); if(!buf->map) goto error3; return &buf->base; error3: vmw_ioctl_region_destroy(buf->region); error2: FREE(buf); error1: return NULL; }
struct pipe_resource * nv30_miptree_from_handle(struct pipe_screen *pscreen, const struct pipe_resource *tmpl, struct winsys_handle *handle) { struct nv30_miptree *mt; unsigned stride; /* only supports 2D, non-mipmapped textures for the moment */ if ((tmpl->target != PIPE_TEXTURE_2D && tmpl->target != PIPE_TEXTURE_RECT) || tmpl->last_level != 0 || tmpl->depth0 != 1 || tmpl->array_size > 1) return NULL; mt = CALLOC_STRUCT(nv30_miptree); if (!mt) return NULL; mt->base.bo = nouveau_screen_bo_from_handle(pscreen, handle, &stride); if (mt->base.bo == NULL) { FREE(mt); return NULL; } mt->base.base = *tmpl; mt->base.vtbl = &nv30_miptree_vtbl; pipe_reference_init(&mt->base.base.reference, 1); mt->base.base.screen = pscreen; mt->uniform_pitch = stride; mt->level[0].pitch = mt->uniform_pitch; mt->level[0].offset = 0; /* no need to adjust bo reference count */ return &mt->base.base; }
/** * Create new pipe_resource given the template information. */ static struct pipe_resource * softpipe_resource_create_front(struct pipe_screen *screen, const struct pipe_resource *templat, const void *map_front_private) { struct softpipe_resource *spr = CALLOC_STRUCT(softpipe_resource); if (!spr) return NULL; assert(templat->format != PIPE_FORMAT_NONE); spr->base = *templat; pipe_reference_init(&spr->base.reference, 1); spr->base.screen = screen; spr->pot = (util_is_power_of_two(templat->width0) && util_is_power_of_two(templat->height0) && util_is_power_of_two(templat->depth0)); if (spr->base.bind & (PIPE_BIND_DISPLAY_TARGET | PIPE_BIND_SCANOUT | PIPE_BIND_SHARED)) { if (!softpipe_displaytarget_layout(screen, spr, map_front_private)) goto fail; } else { if (!softpipe_resource_layout(screen, spr, TRUE)) goto fail; } return &spr->base; fail: FREE(spr); return NULL; }
static void * vc5_vertex_state_create(struct pipe_context *pctx, unsigned num_elements, const struct pipe_vertex_element *elements) { struct vc5_context *vc5 = vc5_context(pctx); struct vc5_vertex_stateobj *so = CALLOC_STRUCT(vc5_vertex_stateobj); if (!so) return NULL; memcpy(so->pipe, elements, sizeof(*elements) * num_elements); so->num_elements = num_elements; for (int i = 0; i < so->num_elements; i++) { const struct pipe_vertex_element *elem = &elements[i]; const struct util_format_description *desc = util_format_description(elem->src_format); uint32_t r_size = desc->channel[0].size; struct V3D33_GL_SHADER_STATE_ATTRIBUTE_RECORD attr_unpacked = { /* vec_size == 0 means 4 */ .vec_size = desc->nr_channels & 3, .signed_int_type = (desc->channel[0].type == UTIL_FORMAT_TYPE_SIGNED), .normalized_int_type = desc->channel[0].normalized, .read_as_int_uint = desc->channel[0].pure_integer, .instance_divisor = elem->instance_divisor, }; switch (desc->channel[0].type) { case UTIL_FORMAT_TYPE_FLOAT: if (r_size == 32) { attr_unpacked.type = ATTRIBUTE_FLOAT; } else { assert(r_size == 16); attr_unpacked.type = ATTRIBUTE_HALF_FLOAT; } break; case UTIL_FORMAT_TYPE_SIGNED: case UTIL_FORMAT_TYPE_UNSIGNED: switch (r_size) { case 32: attr_unpacked.type = ATTRIBUTE_INT; break; case 16: attr_unpacked.type = ATTRIBUTE_SHORT; break; case 10: attr_unpacked.type = ATTRIBUTE_INT2_10_10_10; break; case 8: attr_unpacked.type = ATTRIBUTE_BYTE; break; default: fprintf(stderr, "format %s unsupported\n", desc->name); attr_unpacked.type = ATTRIBUTE_BYTE; abort(); } break; default: fprintf(stderr, "format %s unsupported\n", desc->name); abort(); } const uint32_t size = cl_packet_length(GL_SHADER_STATE_ATTRIBUTE_RECORD); V3D33_GL_SHADER_STATE_ATTRIBUTE_RECORD_pack(NULL, (uint8_t *)&so->attrs[i * size], &attr_unpacked); } /* Set up the default attribute values in case any of the vertex * elements use them. */ so->default_attribute_values = vc5_bo_alloc(vc5->screen, VC5_MAX_ATTRIBUTES * 4 * sizeof(float), "default attributes"); uint32_t *attrs = vc5_bo_map(so->default_attribute_values); for (int i = 0; i < VC5_MAX_ATTRIBUTES; i++) { attrs[i * 4 + 0] = 0; attrs[i * 4 + 1] = 0; attrs[i * 4 + 2] = 0; if (i < so->num_elements && util_format_is_pure_integer(so->pipe[i].src_format)) { attrs[i * 4 + 3] = 1; } else { attrs[i * 4 + 3] = fui(1.0); } } return so; } static void vc5_vertex_state_bind(struct pipe_context *pctx, void *hwcso) { struct vc5_context *vc5 = vc5_context(pctx); vc5->vtx = hwcso; vc5->dirty |= VC5_DIRTY_VTXSTATE; } static void vc5_set_constant_buffer(struct pipe_context *pctx, uint shader, uint index, const struct pipe_constant_buffer *cb) { struct vc5_context *vc5 = vc5_context(pctx); struct vc5_constbuf_stateobj *so = &vc5->constbuf[shader]; util_copy_constant_buffer(&so->cb[index], cb); /* Note that the state tracker can unbind constant buffers by * passing NULL here. */ if (unlikely(!cb)) { so->enabled_mask &= ~(1 << index); so->dirty_mask &= ~(1 << index); return; } so->enabled_mask |= 1 << index; so->dirty_mask |= 1 << index; vc5->dirty |= VC5_DIRTY_CONSTBUF; } static void vc5_set_framebuffer_state(struct pipe_context *pctx, const struct pipe_framebuffer_state *framebuffer) { struct vc5_context *vc5 = vc5_context(pctx); struct pipe_framebuffer_state *cso = &vc5->framebuffer; unsigned i; vc5->job = NULL; for (i = 0; i < framebuffer->nr_cbufs; i++) pipe_surface_reference(&cso->cbufs[i], framebuffer->cbufs[i]); for (; i < vc5->framebuffer.nr_cbufs; i++) pipe_surface_reference(&cso->cbufs[i], NULL); cso->nr_cbufs = framebuffer->nr_cbufs; pipe_surface_reference(&cso->zsbuf, framebuffer->zsbuf); cso->width = framebuffer->width; cso->height = framebuffer->height; vc5->dirty |= VC5_DIRTY_FRAMEBUFFER; } static struct vc5_texture_stateobj * vc5_get_stage_tex(struct vc5_context *vc5, enum pipe_shader_type shader) { switch (shader) { case PIPE_SHADER_FRAGMENT: vc5->dirty |= VC5_DIRTY_FRAGTEX; return &vc5->fragtex; break; case PIPE_SHADER_VERTEX: vc5->dirty |= VC5_DIRTY_VERTTEX; return &vc5->verttex; break; default: fprintf(stderr, "Unknown shader target %d\n", shader); abort(); } } static uint32_t translate_wrap(uint32_t pipe_wrap, bool using_nearest) { switch (pipe_wrap) { case PIPE_TEX_WRAP_REPEAT: return 0; case PIPE_TEX_WRAP_CLAMP_TO_EDGE: return 1; case PIPE_TEX_WRAP_MIRROR_REPEAT: return 2; case PIPE_TEX_WRAP_CLAMP_TO_BORDER: return 3; case PIPE_TEX_WRAP_CLAMP: return (using_nearest ? 1 : 3); default: unreachable("Unknown wrap mode"); } } static void * vc5_create_sampler_state(struct pipe_context *pctx, const struct pipe_sampler_state *cso) { struct vc5_sampler_state *so = CALLOC_STRUCT(vc5_sampler_state); if (!so) return NULL; memcpy(so, cso, sizeof(*cso)); bool either_nearest = (cso->mag_img_filter == PIPE_TEX_MIPFILTER_NEAREST || cso->min_img_filter == PIPE_TEX_MIPFILTER_NEAREST); struct V3D33_TEXTURE_UNIFORM_PARAMETER_0_CFG_MODE1 p0_unpacked = { .s_wrap_mode = translate_wrap(cso->wrap_s, either_nearest), .t_wrap_mode = translate_wrap(cso->wrap_t, either_nearest), .r_wrap_mode = translate_wrap(cso->wrap_r, either_nearest), }; V3D33_TEXTURE_UNIFORM_PARAMETER_0_CFG_MODE1_pack(NULL, (uint8_t *)&so->p0, &p0_unpacked); struct V3D33_TEXTURE_SHADER_STATE state_unpacked = { cl_packet_header(TEXTURE_SHADER_STATE), .min_level_of_detail = MAX2(cso->min_lod, 0.0), .depth_compare_function = cso->compare_func, .fixed_bias = cso->lod_bias, }; STATIC_ASSERT(ARRAY_SIZE(so->texture_shader_state) == cl_packet_length(TEXTURE_SHADER_STATE)); cl_packet_pack(TEXTURE_SHADER_STATE)(NULL, so->texture_shader_state, &state_unpacked); return so; } static void vc5_sampler_states_bind(struct pipe_context *pctx, enum pipe_shader_type shader, unsigned start, unsigned nr, void **hwcso) { struct vc5_context *vc5 = vc5_context(pctx); struct vc5_texture_stateobj *stage_tex = vc5_get_stage_tex(vc5, shader); assert(start == 0); unsigned i; unsigned new_nr = 0; for (i = 0; i < nr; i++) { if (hwcso[i]) new_nr = i + 1; stage_tex->samplers[i] = hwcso[i]; } for (; i < stage_tex->num_samplers; i++) { stage_tex->samplers[i] = NULL; } stage_tex->num_samplers = new_nr; } static uint32_t translate_swizzle(unsigned char pipe_swizzle) { switch (pipe_swizzle) { case PIPE_SWIZZLE_0: return 0; case PIPE_SWIZZLE_1: return 1; case PIPE_SWIZZLE_X: case PIPE_SWIZZLE_Y: case PIPE_SWIZZLE_Z: case PIPE_SWIZZLE_W: return 2 + pipe_swizzle; default: unreachable("unknown swizzle"); } } static struct pipe_sampler_view * vc5_create_sampler_view(struct pipe_context *pctx, struct pipe_resource *prsc, const struct pipe_sampler_view *cso) { struct vc5_sampler_view *so = CALLOC_STRUCT(vc5_sampler_view); struct vc5_resource *rsc = vc5_resource(prsc); if (!so) return NULL; so->base = *cso; pipe_reference(NULL, &prsc->reference); struct V3D33_TEXTURE_UNIFORM_PARAMETER_1_CFG_MODE1 unpacked = { }; unpacked.return_word_0_of_texture_data = true; if (vc5_get_tex_return_size(cso->format) == 16) { unpacked.return_word_1_of_texture_data = true; } else { int chans = vc5_get_tex_return_channels(cso->format); if (chans > 1) unpacked.return_word_1_of_texture_data = true; if (chans > 2) unpacked.return_word_2_of_texture_data = true; if (chans > 3) unpacked.return_word_3_of_texture_data = true; } V3D33_TEXTURE_UNIFORM_PARAMETER_1_CFG_MODE1_pack(NULL, (uint8_t *)&so->p1, &unpacked); /* Compute the sampler view's swizzle up front. This will be plugged * into either the sampler (for 16-bit returns) or the shader's * texture key (for 32) */ uint8_t view_swizzle[4] = { cso->swizzle_r, cso->swizzle_g, cso->swizzle_b, cso->swizzle_a }; const uint8_t *fmt_swizzle = vc5_get_format_swizzle(so->base.format); util_format_compose_swizzles(fmt_swizzle, view_swizzle, so->swizzle); so->base.texture = prsc; so->base.reference.count = 1; so->base.context = pctx; struct V3D33_TEXTURE_SHADER_STATE state_unpacked = { cl_packet_header(TEXTURE_SHADER_STATE), .image_width = prsc->width0, .image_height = prsc->height0, .image_depth = prsc->depth0, .texture_type = rsc->tex_format, .srgb = util_format_is_srgb(cso->format), .base_level = cso->u.tex.first_level, .array_stride_64_byte_aligned = rsc->cube_map_stride / 64, }; /* Note: Contrary to the docs, the swizzle still applies even * if the return size is 32. It's just that you probably want * to swizzle in the shader, because you need the Y/Z/W * channels to be defined. */ if (vc5_get_tex_return_size(cso->format) != 32) { state_unpacked.swizzle_r = translate_swizzle(so->swizzle[0]); state_unpacked.swizzle_g = translate_swizzle(so->swizzle[1]); state_unpacked.swizzle_b = translate_swizzle(so->swizzle[2]); state_unpacked.swizzle_a = translate_swizzle(so->swizzle[3]); } else { state_unpacked.swizzle_r = translate_swizzle(PIPE_SWIZZLE_X); state_unpacked.swizzle_g = translate_swizzle(PIPE_SWIZZLE_Y); state_unpacked.swizzle_b = translate_swizzle(PIPE_SWIZZLE_Z); state_unpacked.swizzle_a = translate_swizzle(PIPE_SWIZZLE_W); } /* XXX: While we need to use this flag to enable tiled * resource sharing (even a small shared buffer should be UIF, * not UBLINEAR or raster), this is also at the moment * patching up the fact that our resource layout's decisions * about XOR don't quite match the HW's. */ switch (rsc->slices[0].tiling) { case VC5_TILING_UIF_NO_XOR: case VC5_TILING_UIF_XOR: state_unpacked.level_0_is_strictly_uif = true; state_unpacked.level_0_xor_enable = false; break; default: break; } STATIC_ASSERT(ARRAY_SIZE(so->texture_shader_state) == cl_packet_length(TEXTURE_SHADER_STATE)); cl_packet_pack(TEXTURE_SHADER_STATE)(NULL, so->texture_shader_state, &state_unpacked); return &so->base; } static void vc5_sampler_view_destroy(struct pipe_context *pctx, struct pipe_sampler_view *view) { pipe_resource_reference(&view->texture, NULL); free(view); } static void vc5_set_sampler_views(struct pipe_context *pctx, enum pipe_shader_type shader, unsigned start, unsigned nr, struct pipe_sampler_view **views) { struct vc5_context *vc5 = vc5_context(pctx); struct vc5_texture_stateobj *stage_tex = vc5_get_stage_tex(vc5, shader); unsigned i; unsigned new_nr = 0; assert(start == 0); for (i = 0; i < nr; i++) { if (views[i]) new_nr = i + 1; pipe_sampler_view_reference(&stage_tex->textures[i], views[i]); } for (; i < stage_tex->num_textures; i++) { pipe_sampler_view_reference(&stage_tex->textures[i], NULL); } stage_tex->num_textures = new_nr; } static struct pipe_stream_output_target * vc5_create_stream_output_target(struct pipe_context *pctx, struct pipe_resource *prsc, unsigned buffer_offset, unsigned buffer_size) { struct pipe_stream_output_target *target; target = CALLOC_STRUCT(pipe_stream_output_target); if (!target) return NULL; pipe_reference_init(&target->reference, 1); pipe_resource_reference(&target->buffer, prsc); target->context = pctx; target->buffer_offset = buffer_offset; target->buffer_size = buffer_size; return target; } static void vc5_stream_output_target_destroy(struct pipe_context *pctx, struct pipe_stream_output_target *target) { pipe_resource_reference(&target->buffer, NULL); free(target); }
PUBLIC struct radeon_winsys * radeon_drm_winsys_create(int fd, radeon_screen_create_t screen_create) { struct radeon_drm_winsys *ws; pipe_mutex_lock(fd_tab_mutex); if (!fd_tab) { fd_tab = util_hash_table_create(hash_fd, compare_fd); } ws = util_hash_table_get(fd_tab, intptr_to_pointer(fd)); if (ws) { pipe_reference(NULL, &ws->reference); pipe_mutex_unlock(fd_tab_mutex); return &ws->base; } ws = CALLOC_STRUCT(radeon_drm_winsys); if (!ws) { pipe_mutex_unlock(fd_tab_mutex); return NULL; } ws->fd = dup(fd); if (!do_winsys_init(ws)) goto fail; /* Create managers. */ ws->kman = radeon_bomgr_create(ws); if (!ws->kman) goto fail; ws->cman = pb_cache_manager_create(ws->kman, 500000, 2.0f, 0, MIN2(ws->info.vram_size, ws->info.gart_size)); if (!ws->cman) goto fail; if (ws->gen >= DRV_R600) { ws->surf_man = radeon_surface_manager_new(ws->fd); if (!ws->surf_man) goto fail; } /* init reference */ pipe_reference_init(&ws->reference, 1); /* Set functions. */ ws->base.unref = radeon_winsys_unref; ws->base.destroy = radeon_winsys_destroy; ws->base.query_info = radeon_query_info; ws->base.cs_request_feature = radeon_cs_request_feature; ws->base.query_value = radeon_query_value; ws->base.read_registers = radeon_read_registers; radeon_bomgr_init_functions(ws); radeon_drm_cs_init_functions(ws); radeon_surface_init_functions(ws); pipe_mutex_init(ws->hyperz_owner_mutex); pipe_mutex_init(ws->cmask_owner_mutex); pipe_mutex_init(ws->cs_stack_lock); ws->ncs = 0; pipe_semaphore_init(&ws->cs_queued, 0); if (ws->num_cpus > 1 && debug_get_option_thread()) ws->thread = pipe_thread_create(radeon_drm_cs_emit_ioctl, ws); /* Create the screen at the end. The winsys must be initialized * completely. * * Alternatively, we could create the screen based on "ws->gen" * and link all drivers into one binary blob. */ ws->base.screen = screen_create(&ws->base); if (!ws->base.screen) { radeon_winsys_destroy(&ws->base); pipe_mutex_unlock(fd_tab_mutex); return NULL; } util_hash_table_set(fd_tab, intptr_to_pointer(ws->fd), ws); /* We must unlock the mutex once the winsys is fully initialized, so that * other threads attempting to create the winsys from the same fd will * get a fully initialized winsys and not just half-way initialized. */ pipe_mutex_unlock(fd_tab_mutex); return &ws->base; fail: pipe_mutex_unlock(fd_tab_mutex); if (ws->cman) ws->cman->destroy(ws->cman); if (ws->kman) ws->kman->destroy(ws->kman); if (ws->surf_man) radeon_surface_manager_free(ws->surf_man); if (ws->fd >= 0) close(ws->fd); FREE(ws); return NULL; }
static struct pb_buffer * pb_debug_manager_create_buffer(struct pb_manager *_mgr, pb_size size, const struct pb_desc *desc) { struct pb_debug_manager *mgr = pb_debug_manager(_mgr); struct pb_debug_buffer *buf; struct pb_desc real_desc; pb_size real_size; assert(size); assert(desc->alignment); buf = CALLOC_STRUCT(pb_debug_buffer); if(!buf) return NULL; real_size = mgr->underflow_size + size + mgr->overflow_size; real_desc = *desc; real_desc.usage |= PIPE_BUFFER_USAGE_CPU_WRITE; real_desc.usage |= PIPE_BUFFER_USAGE_CPU_READ; buf->buffer = mgr->provider->create_buffer(mgr->provider, real_size, &real_desc); if(!buf->buffer) { FREE(buf); #if 0 pipe_mutex_lock(mgr->mutex); debug_printf("%s: failed to create buffer\n", __FUNCTION__); if(!LIST_IS_EMPTY(&mgr->list)) pb_debug_manager_dump(mgr); pipe_mutex_unlock(mgr->mutex); #endif return NULL; } assert(pipe_is_referenced(&buf->buffer->base.reference)); assert(pb_check_alignment(real_desc.alignment, buf->buffer->base.alignment)); assert(pb_check_usage(real_desc.usage, buf->buffer->base.usage)); assert(buf->buffer->base.size >= real_size); pipe_reference_init(&buf->base.base.reference, 1); buf->base.base.alignment = desc->alignment; buf->base.base.usage = desc->usage; buf->base.base.size = size; buf->base.vtbl = &pb_debug_buffer_vtbl; buf->mgr = mgr; buf->underflow_size = mgr->underflow_size; buf->overflow_size = buf->buffer->base.size - buf->underflow_size - size; debug_backtrace_capture(buf->create_backtrace, 1, PB_DEBUG_CREATE_BACKTRACE); pb_debug_buffer_fill(buf); pipe_mutex_init(buf->mutex); pipe_mutex_lock(mgr->mutex); LIST_ADDTAIL(&buf->head, &mgr->list); pipe_mutex_unlock(mgr->mutex); return &buf->base; }
/** * vmw_drm_gb_surface_from_handle - Create a shared surface * * @sws: Screen to register the surface with. * @whandle: struct winsys_handle identifying the kernel surface object * @format: On successful return points to a value describing the * surface format. * * Returns a refcounted pointer to a struct svga_winsys_surface * embedded in a struct vmw_svga_winsys_surface on success or NULL * on failure. */ static struct svga_winsys_surface * vmw_drm_gb_surface_from_handle(struct svga_winsys_screen *sws, struct winsys_handle *whandle, SVGA3dSurfaceFormat *format) { struct vmw_svga_winsys_surface *vsrf; struct svga_winsys_surface *ssrf; struct vmw_winsys_screen *vws = vmw_winsys_screen(sws); SVGA3dSurfaceFlags flags; uint32_t mip_levels; struct vmw_buffer_desc desc; struct pb_manager *provider = vws->pools.gmr; struct pb_buffer *pb_buf; uint32_t handle; int ret; if (whandle->offset != 0) { fprintf(stderr, "Attempt to import unsupported winsys offset %u\n", whandle->offset); return NULL; } ret = vmw_ioctl_gb_surface_ref(vws, whandle, &flags, format, &mip_levels, &handle, &desc.region); if (ret) { fprintf(stderr, "Failed referencing shared surface. SID %d.\n" "Error %d (%s).\n", whandle->handle, ret, strerror(-ret)); return NULL; } if (mip_levels != 1) { fprintf(stderr, "Incorrect number of mipmap levels on shared surface." " SID %d, levels %d\n", whandle->handle, mip_levels); goto out_mip; } vsrf = CALLOC_STRUCT(vmw_svga_winsys_surface); if (!vsrf) goto out_mip; pipe_reference_init(&vsrf->refcnt, 1); p_atomic_set(&vsrf->validated, 0); vsrf->screen = vws; vsrf->sid = handle; vsrf->size = vmw_region_size(desc.region); /* * Synchronize backing buffers of shared surfaces using the * kernel, since we don't pass fence objects around between * processes. */ desc.pb_desc.alignment = 4096; desc.pb_desc.usage = VMW_BUFFER_USAGE_SHARED | VMW_BUFFER_USAGE_SYNC; pb_buf = provider->create_buffer(provider, vsrf->size, &desc.pb_desc); vsrf->buf = vmw_svga_winsys_buffer_wrap(pb_buf); if (!vsrf->buf) goto out_no_buf; ssrf = svga_winsys_surface(vsrf); return ssrf; out_no_buf: FREE(vsrf); out_mip: vmw_ioctl_region_destroy(desc.region); vmw_ioctl_surface_destroy(vws, whandle->handle); return NULL; }
static struct svga_winsys_surface * vmw_drm_surface_from_handle(struct svga_winsys_screen *sws, struct winsys_handle *whandle, SVGA3dSurfaceFormat *format) { struct vmw_svga_winsys_surface *vsrf; struct svga_winsys_surface *ssrf; struct vmw_winsys_screen *vws = vmw_winsys_screen(sws); union drm_vmw_surface_reference_arg arg; struct drm_vmw_surface_arg *req = &arg.req; struct drm_vmw_surface_create_req *rep = &arg.rep; uint32_t handle = 0; struct drm_vmw_size size; SVGA3dSize base_size; int ret; int i; if (whandle->offset != 0) { fprintf(stderr, "Attempt to import unsupported winsys offset %u\n", whandle->offset); return NULL; } switch (whandle->type) { case DRM_API_HANDLE_TYPE_SHARED: case DRM_API_HANDLE_TYPE_KMS: handle = whandle->handle; break; case DRM_API_HANDLE_TYPE_FD: ret = drmPrimeFDToHandle(vws->ioctl.drm_fd, whandle->handle, &handle); if (ret) { vmw_error("Failed to get handle from prime fd %d.\n", (int) whandle->handle); return NULL; } break; default: vmw_error("Attempt to import unsupported handle type %d.\n", whandle->type); return NULL; } memset(&arg, 0, sizeof(arg)); req->sid = handle; rep->size_addr = (unsigned long)&size; ret = drmCommandWriteRead(vws->ioctl.drm_fd, DRM_VMW_REF_SURFACE, &arg, sizeof(arg)); /* * Need to close the handle we got from prime. */ if (whandle->type == DRM_API_HANDLE_TYPE_FD) vmw_ioctl_surface_destroy(vws, handle); if (ret) { /* * Any attempt to share something other than a surface, like a dumb * kms buffer, should fail here. */ vmw_error("Failed referencing shared surface. SID %d.\n" "Error %d (%s).\n", handle, ret, strerror(-ret)); return NULL; } if (rep->mip_levels[0] != 1) { vmw_error("Incorrect number of mipmap levels on shared surface." " SID %d, levels %d\n", handle, rep->mip_levels[0]); goto out_mip; } for (i=1; i < DRM_VMW_MAX_SURFACE_FACES; ++i) { if (rep->mip_levels[i] != 0) { vmw_error("Incorrect number of faces levels on shared surface." " SID %d, face %d present.\n", handle, i); goto out_mip; } } vsrf = CALLOC_STRUCT(vmw_svga_winsys_surface); if (!vsrf) goto out_mip; pipe_reference_init(&vsrf->refcnt, 1); p_atomic_set(&vsrf->validated, 0); vsrf->screen = vws; vsrf->sid = handle; ssrf = svga_winsys_surface(vsrf); *format = rep->format; /* Estimate usage, for early flushing. */ base_size.width = size.width; base_size.height = size.height; base_size.depth = size.depth; vsrf->size = svga3dsurface_get_serialized_size(rep->format, base_size, rep->mip_levels[0], FALSE); return ssrf; out_mip: vmw_ioctl_surface_destroy(vws, handle); return NULL; }
static struct pb_buffer *radeon_winsys_bo_from_handle(struct radeon_winsys *rws, struct winsys_handle *whandle, unsigned *stride, unsigned *size) { struct radeon_drm_winsys *ws = radeon_drm_winsys(rws); struct radeon_bo *bo; struct radeon_bomgr *mgr = radeon_bomgr(ws->kman); struct drm_gem_open open_arg = {}; /* We must maintain a list of pairs <handle, bo>, so that we always return * the same BO for one particular handle. If we didn't do that and created * more than one BO for the same handle and then relocated them in a CS, * we would hit a deadlock in the kernel. * * The list of pairs is guarded by a mutex, of course. */ pipe_mutex_lock(mgr->bo_handles_mutex); /* First check if there already is an existing bo for the handle. */ bo = util_hash_table_get(mgr->bo_handles, (void*)(uintptr_t)whandle->handle); if (bo) { /* Increase the refcount. */ struct pb_buffer *b = NULL; pb_reference(&b, &bo->base); goto done; } /* There isn't, create a new one. */ bo = CALLOC_STRUCT(radeon_bo); if (!bo) { goto fail; } /* Open the BO. */ open_arg.name = whandle->handle; if (drmIoctl(ws->fd, DRM_IOCTL_GEM_OPEN, &open_arg)) { FREE(bo); goto fail; } bo->handle = open_arg.handle; bo->size = open_arg.size; bo->name = whandle->handle; /* Initialize it. */ pipe_reference_init(&bo->base.base.reference, 1); bo->base.base.alignment = 0; bo->base.base.usage = PB_USAGE_GPU_WRITE | PB_USAGE_GPU_READ; bo->base.base.size = bo->size; bo->base.vtbl = &radeon_bo_vtbl; bo->mgr = mgr; bo->rws = mgr->rws; pipe_mutex_init(bo->map_mutex); util_hash_table_set(mgr->bo_handles, (void*)(uintptr_t)whandle->handle, bo); done: pipe_mutex_unlock(mgr->bo_handles_mutex); if (stride) *stride = whandle->stride; if (size) *size = bo->base.base.size; return (struct pb_buffer*)bo; fail: pipe_mutex_unlock(mgr->bo_handles_mutex); return NULL; }
struct pb_manager * pool_bufmgr_create(struct pb_manager *provider, pb_size numBufs, pb_size bufSize, const struct pb_desc *desc) { struct pool_pb_manager *pool; struct pool_buffer *pool_buf; pb_size i; if(!provider) return NULL; pool = CALLOC_STRUCT(pool_pb_manager); if (!pool) return NULL; pool->base.destroy = pool_bufmgr_destroy; pool->base.create_buffer = pool_bufmgr_create_buffer; pool->base.flush = pool_bufmgr_flush; LIST_INITHEAD(&pool->free); pool->numTot = numBufs; pool->numFree = numBufs; pool->bufSize = bufSize; pool->bufAlign = desc->alignment; pipe_mutex_init(pool->mutex); pool->buffer = provider->create_buffer(provider, numBufs*bufSize, desc); if (!pool->buffer) goto failure; pool->map = pb_map(pool->buffer, PB_USAGE_CPU_READ | PB_USAGE_CPU_WRITE, NULL); if(!pool->map) goto failure; pool->bufs = (struct pool_buffer *)CALLOC(numBufs, sizeof(*pool->bufs)); if (!pool->bufs) goto failure; pool_buf = pool->bufs; for (i = 0; i < numBufs; ++i) { pipe_reference_init(&pool_buf->base.reference, 0); pool_buf->base.alignment = 0; pool_buf->base.usage = 0; pool_buf->base.size = bufSize; pool_buf->base.vtbl = &pool_buffer_vtbl; pool_buf->mgr = pool; pool_buf->start = i * bufSize; LIST_ADDTAIL(&pool_buf->head, &pool->free); pool_buf++; } return SUPER(pool); failure: if(pool->bufs) FREE(pool->bufs); if(pool->map) pb_unmap(pool->buffer); if(pool->buffer) pb_reference(&pool->buffer, NULL); if(pool) FREE(pool); return NULL; }
static struct svga_winsys_surface * vmw_svga_winsys_surface_create(struct svga_winsys_screen *sws, SVGA3dSurfaceFlags flags, SVGA3dSurfaceFormat format, unsigned usage, SVGA3dSize size, uint32 numLayers, uint32 numMipLevels, unsigned sampleCount) { struct vmw_winsys_screen *vws = vmw_winsys_screen(sws); struct vmw_svga_winsys_surface *surface; struct vmw_buffer_desc desc; struct pb_manager *provider; uint32_t buffer_size; memset(&desc, 0, sizeof(desc)); surface = CALLOC_STRUCT(vmw_svga_winsys_surface); if(!surface) goto no_surface; pipe_reference_init(&surface->refcnt, 1); p_atomic_set(&surface->validated, 0); surface->screen = vws; pipe_mutex_init(surface->mutex); surface->shared = !!(usage & SVGA_SURFACE_USAGE_SHARED); provider = (surface->shared) ? vws->pools.gmr : vws->pools.mob_fenced; /* * Used for the backing buffer GB surfaces, and to approximate * when to flush on non-GB hosts. */ buffer_size = svga3dsurface_get_serialized_size(format, size, numMipLevels, numLayers); if (flags & SVGA3D_SURFACE_BIND_STREAM_OUTPUT) buffer_size += sizeof(SVGA3dDXSOState); if (buffer_size > vws->ioctl.max_texture_size) { goto no_sid; } if (sws->have_gb_objects) { SVGAGuestPtr ptr = {0,0}; /* * If the backing buffer size is small enough, try to allocate a * buffer out of the buffer cache. Otherwise, let the kernel allocate * a suitable buffer for us. */ if (buffer_size < VMW_TRY_CACHED_SIZE && !surface->shared) { struct pb_buffer *pb_buf; surface->size = buffer_size; desc.pb_desc.alignment = 4096; desc.pb_desc.usage = 0; pb_buf = provider->create_buffer(provider, buffer_size, &desc.pb_desc); surface->buf = vmw_svga_winsys_buffer_wrap(pb_buf); if (surface->buf && !vmw_gmr_bufmgr_region_ptr(pb_buf, &ptr)) assert(0); } surface->sid = vmw_ioctl_gb_surface_create(vws, flags, format, usage, size, numLayers, numMipLevels, sampleCount, ptr.gmrId, surface->buf ? NULL : &desc.region); if (surface->sid == SVGA3D_INVALID_ID && surface->buf) { /* * Kernel refused to allocate a surface for us. * Perhaps something was wrong with our buffer? * This is really a guard against future new size requirements * on the backing buffers. */ vmw_svga_winsys_buffer_destroy(sws, surface->buf); surface->buf = NULL; surface->sid = vmw_ioctl_gb_surface_create(vws, flags, format, usage, size, numLayers, numMipLevels, sampleCount, 0, &desc.region); if (surface->sid == SVGA3D_INVALID_ID) goto no_sid; } /* * If the kernel created the buffer for us, wrap it into a * vmw_svga_winsys_buffer. */ if (surface->buf == NULL) { struct pb_buffer *pb_buf; surface->size = vmw_region_size(desc.region); desc.pb_desc.alignment = 4096; desc.pb_desc.usage = VMW_BUFFER_USAGE_SHARED; pb_buf = provider->create_buffer(provider, surface->size, &desc.pb_desc); surface->buf = vmw_svga_winsys_buffer_wrap(pb_buf); if (surface->buf == NULL) { vmw_ioctl_region_destroy(desc.region); vmw_ioctl_surface_destroy(vws, surface->sid); goto no_sid; } } } else { surface->sid = vmw_ioctl_surface_create(vws, flags, format, usage, size, numLayers, numMipLevels, sampleCount); if(surface->sid == SVGA3D_INVALID_ID) goto no_sid; /* Best estimate for surface size, used for early flushing. */ surface->size = buffer_size; surface->buf = NULL; } return svga_winsys_surface(surface); no_sid: if (surface->buf) vmw_svga_winsys_buffer_destroy(sws, surface->buf); FREE(surface); no_surface: return NULL; }
static struct pipe_surface * svga_get_tex_surface(struct pipe_screen *screen, struct pipe_resource *pt, unsigned face, unsigned level, unsigned zslice, unsigned flags) { struct svga_texture *tex = svga_texture(pt); struct svga_surface *s; boolean render = (flags & (PIPE_BIND_RENDER_TARGET | PIPE_BIND_DEPTH_STENCIL)) ? TRUE : FALSE; boolean view = FALSE; SVGA3dSurfaceFormat format; s = CALLOC_STRUCT(svga_surface); if (!s) return NULL; pipe_reference_init(&s->base.reference, 1); pipe_resource_reference(&s->base.texture, pt); s->base.format = pt->format; s->base.width = u_minify(pt->width0, level); s->base.height = u_minify(pt->height0, level); s->base.usage = flags; s->base.level = level; s->base.face = face; s->base.zslice = zslice; if (!render) format = svga_translate_format(pt->format); else format = svga_translate_format_render(pt->format); assert(format != SVGA3D_FORMAT_INVALID); if (svga_screen(screen)->debug.force_surface_view) view = TRUE; /* Currently only used for compressed textures */ if (render && format != svga_translate_format(pt->format)) { view = TRUE; } if (level != 0 && svga_screen(screen)->debug.force_level_surface_view) view = TRUE; if (pt->target == PIPE_TEXTURE_3D) view = TRUE; if (svga_screen(screen)->debug.no_surface_view) view = FALSE; if (view) { SVGA_DBG(DEBUG_VIEWS, "svga: Surface view: yes %p, level %u face %u z %u, %p\n", pt, level, face, zslice, s); s->handle = svga_texture_view_surface(NULL, tex, format, level, 1, face, zslice, &s->key); s->real_face = 0; s->real_level = 0; s->real_zslice = 0; } else { SVGA_DBG(DEBUG_VIEWS, "svga: Surface view: no %p, level %u, face %u, z %u, %p\n", pt, level, face, zslice, s); memset(&s->key, 0, sizeof s->key); s->handle = tex->handle; s->real_face = face; s->real_level = level; s->real_zslice = zslice; } return &s->base; }