static void ilo_fence_reference(struct pipe_screen *screen, struct pipe_fence_handle **p, struct pipe_fence_handle *f) { struct ilo_fence **ptr = (struct ilo_fence **) p; struct ilo_fence *fence = ilo_fence(f); if (!ptr) { /* still need to reference fence */ if (fence) pipe_reference(NULL, &fence->reference); return; } /* reference fence and dereference the one pointed to by ptr */ if (*ptr && pipe_reference(&(*ptr)->reference, &fence->reference)) { struct ilo_fence *old = *ptr; if (old->bo) intel_bo_unreference(old->bo); FREE(old); } *ptr = fence; }
static struct pipe_sampler_view * fd_sampler_view_create(struct pipe_context *pctx, struct pipe_resource *prsc, const struct pipe_sampler_view *cso) { struct fd_pipe_sampler_view *so = CALLOC_STRUCT(fd_pipe_sampler_view); struct fd_resource *rsc = fd_resource(prsc); if (!so) return NULL; so->base = *cso; pipe_reference(NULL, &prsc->reference); so->base.texture = prsc; so->base.reference.count = 1; so->base.context = pctx; so->tex_resource = rsc; so->fmt = fd_pipe2surface(cso->format); so->tex0 = SQ_TEX0_PITCH(rsc->pitch); so->tex2 = SQ_TEX2_HEIGHT(prsc->height0) | SQ_TEX2_WIDTH(prsc->width0); so->tex3 = fd_tex_swiz(cso->format, cso->swizzle_r, cso->swizzle_g, cso->swizzle_b, cso->swizzle_a); return &so->base; }
static struct pipe_sampler_view * fd4_sampler_view_create(struct pipe_context *pctx, struct pipe_resource *prsc, const struct pipe_sampler_view *cso) { struct fd4_pipe_sampler_view *so = CALLOC_STRUCT(fd4_pipe_sampler_view); struct fd_resource *rsc = fd_resource(prsc); unsigned lvl = fd_sampler_first_level(cso); unsigned miplevels = fd_sampler_last_level(cso) - lvl; if (!so) return NULL; so->base = *cso; pipe_reference(NULL, &prsc->reference); so->base.texture = prsc; so->base.reference.count = 1; so->base.context = pctx; so->texconst0 = A4XX_TEX_CONST_0_TYPE(tex_type(prsc->target)) | A4XX_TEX_CONST_0_FMT(fd4_pipe2tex(cso->format)) | A4XX_TEX_CONST_0_MIPLVLS(miplevels) | fd4_tex_swiz(cso->format, cso->swizzle_r, cso->swizzle_g, cso->swizzle_b, cso->swizzle_a); if (util_format_is_srgb(cso->format)) so->texconst0 |= A4XX_TEX_CONST_0_SRGB; so->texconst1 = A4XX_TEX_CONST_1_WIDTH(u_minify(prsc->width0, lvl)) | A4XX_TEX_CONST_1_HEIGHT(u_minify(prsc->height0, lvl)); so->texconst2 = A4XX_TEX_CONST_2_FETCHSIZE(fd4_pipe2fetchsize(cso->format)) | A4XX_TEX_CONST_2_PITCH(rsc->slices[lvl].pitch * rsc->cpp); switch (prsc->target) { case PIPE_TEXTURE_1D_ARRAY: case PIPE_TEXTURE_2D_ARRAY: so->texconst3 = A4XX_TEX_CONST_3_DEPTH(prsc->array_size) | A4XX_TEX_CONST_3_LAYERSZ(rsc->layer_size); break; case PIPE_TEXTURE_CUBE: case PIPE_TEXTURE_CUBE_ARRAY: so->texconst3 = A4XX_TEX_CONST_3_DEPTH(prsc->array_size / 6) | A4XX_TEX_CONST_3_LAYERSZ(rsc->layer_size); break; case PIPE_TEXTURE_3D: so->texconst3 = A4XX_TEX_CONST_3_DEPTH(u_minify(prsc->depth0, lvl)) | A4XX_TEX_CONST_3_LAYERSZ(rsc->slices[0].size0); break; default: so->texconst3 = 0x00000000; break; } return &so->base; }
static struct pipe_sampler_view * fd3_sampler_view_create(struct pipe_context *pctx, struct pipe_resource *prsc, const struct pipe_sampler_view *cso) { struct fd3_pipe_sampler_view *so = CALLOC_STRUCT(fd3_pipe_sampler_view); struct fd_resource *rsc = fd_resource(prsc); if (!so) return NULL; so->base = *cso; pipe_reference(NULL, &prsc->reference); so->base.texture = prsc; so->base.reference.count = 1; so->base.context = pctx; so->tex_resource = rsc; so->texconst0 = 0x40000000 | /* ??? */ A3XX_TEX_CONST_0_FMT(fd3_pipe2tex(cso->format)) | fd3_tex_swiz(cso->format, cso->swizzle_r, cso->swizzle_g, cso->swizzle_b, cso->swizzle_a); so->texconst1 = A3XX_TEX_CONST_1_FETCHSIZE(fd3_pipe2fetchsize(cso->format)) | A3XX_TEX_CONST_1_WIDTH(prsc->width0) | A3XX_TEX_CONST_1_HEIGHT(prsc->height0); /* when emitted, A3XX_TEX_CONST_2_INDX() must be OR'd in: */ so->texconst2 = A3XX_TEX_CONST_2_PITCH(rsc->pitch * rsc->cpp); so->texconst3 = 0x00000000; /* ??? */ return &so->base; }
static void radeon_winsys_destroy(struct radeon_winsys *rws) { struct radeon_drm_winsys *ws = (struct radeon_drm_winsys*)rws; if (ws->thread) { ws->kill_thread = 1; pipe_semaphore_signal(&ws->cs_queued); pipe_thread_wait(ws->thread); } pipe_semaphore_destroy(&ws->cs_queued); pipe_condvar_destroy(ws->cs_queue_empty); if (!pipe_reference(&ws->base.reference, NULL)) { return; } pipe_mutex_destroy(ws->hyperz_owner_mutex); pipe_mutex_destroy(ws->cmask_owner_mutex); pipe_mutex_destroy(ws->cs_stack_lock); ws->cman->destroy(ws->cman); ws->kman->destroy(ws->kman); if (ws->gen >= DRV_R600) { radeon_surface_manager_free(ws->surf_man); } if (fd_tab) { util_hash_table_remove(fd_tab, intptr_to_pointer(ws->fd)); } FREE(rws); }
void vmw_svga_winsys_surface_reference(struct vmw_svga_winsys_surface **pdst, struct vmw_svga_winsys_surface *src) { struct pipe_reference *src_ref; struct pipe_reference *dst_ref; struct vmw_svga_winsys_surface *dst; if(pdst == NULL || *pdst == src) return; dst = *pdst; src_ref = src ? &src->refcnt : NULL; dst_ref = dst ? &dst->refcnt : NULL; if (pipe_reference(dst_ref, src_ref)) { vmw_ioctl_surface_destroy(dst->screen, dst->sid); #ifdef DEBUG /* to detect dangling pointers */ assert(p_atomic_read(&dst->validated) == 0); dst->sid = SVGA3D_INVALID_ID; #endif FREE(dst); } *pdst = src; }
static void st_device_reference(struct st_device **ptr, struct st_device *st_dev) { struct st_device *old_dev = *ptr; if (pipe_reference((struct pipe_reference **)ptr, &st_dev->reference)) st_device_really_destroy(old_dev); }
static void st_device_reference(struct st_device **ptr, struct st_device *st_dev) { struct st_device *old_dev = *ptr; if (pipe_reference(&(*ptr)->reference, &st_dev->reference)) st_device_really_destroy(old_dev); *ptr = st_dev; }
void fd_screen_fence_ref(struct pipe_screen *pscreen, struct pipe_fence_handle **ptr, struct pipe_fence_handle *pfence) { if (pipe_reference(&(*ptr)->reference, &pfence->reference)) FREE(*ptr); *ptr = pfence; }
static void i915_sw_fence_reference(struct i915_winsys *iws, struct pipe_fence_handle **ptr, struct pipe_fence_handle *fence) { struct i915_sw_fence *old = (struct i915_sw_fence *)*ptr; struct i915_sw_fence *f = (struct i915_sw_fence *)fence; if (pipe_reference(&((struct i915_sw_fence *)(*ptr))->reference, &f->reference)) { FREE(old); } *ptr = fence; }
/** * Patch up the upload DMA command reserved by svga_buffer_upload_command * with the final ranges. */ static void svga_buffer_upload_flush(struct svga_context *svga, struct svga_buffer *sbuf) { SVGA3dCopyBox *boxes; unsigned i; assert(sbuf->handle); assert(sbuf->hwbuf); assert(sbuf->map.num_ranges); assert(sbuf->dma.svga == svga); assert(sbuf->dma.boxes); /* * Patch the DMA command with the final copy box. */ SVGA_DBG(DEBUG_DMA, "dma to sid %p\n", sbuf->handle); boxes = sbuf->dma.boxes; for(i = 0; i < sbuf->map.num_ranges; ++i) { SVGA_DBG(DEBUG_DMA, " bytes %u - %u\n", sbuf->map.ranges[i].start, sbuf->map.ranges[i].end); boxes[i].x = sbuf->map.ranges[i].start; boxes[i].y = 0; boxes[i].z = 0; boxes[i].w = sbuf->map.ranges[i].end - sbuf->map.ranges[i].start; boxes[i].h = 1; boxes[i].d = 1; boxes[i].srcx = sbuf->map.ranges[i].start; boxes[i].srcy = 0; boxes[i].srcz = 0; } sbuf->map.num_ranges = 0; assert(sbuf->head.prev && sbuf->head.next); LIST_DEL(&sbuf->head); #ifdef DEBUG sbuf->head.next = sbuf->head.prev = NULL; #endif sbuf->dma.pending = FALSE; sbuf->dma.svga = NULL; sbuf->dma.boxes = NULL; /* Decrement reference count */ pipe_reference(&(sbuf->b.b.reference), NULL); sbuf = NULL; }
static void intel_drm_fence_reference(struct intel_winsys *iws, struct pipe_fence_handle **ptr, struct pipe_fence_handle *fence) { struct intel_drm_fence *old = (struct intel_drm_fence *)*ptr; struct intel_drm_fence *f = (struct intel_drm_fence *)fence; if (pipe_reference((struct pipe_reference**)ptr, &f->reference)) { if (old->bo) drm_intel_bo_unreference(old->bo); FREE(old); } }
static void vc4_fence_reference(struct pipe_screen *pscreen, struct pipe_fence_handle **pp, struct pipe_fence_handle *pf) { struct vc4_fence **p = (struct vc4_fence **)pp; struct vc4_fence *f = (struct vc4_fence *)pf; struct vc4_fence *old = *p; if (pipe_reference(&(*p)->reference, &f->reference)) { free(old); } *p = f; }
static void r600_fence_reference(struct pipe_screen *screen, struct pipe_fence_handle **dst, struct pipe_fence_handle *src) { struct radeon_winsys *ws = ((struct r600_common_screen*)screen)->ws; struct r600_multi_fence **rdst = (struct r600_multi_fence **)dst; struct r600_multi_fence *rsrc = (struct r600_multi_fence *)src; if (pipe_reference(&(*rdst)->reference, &rsrc->reference)) { ws->fence_reference(&(*rdst)->gfx, NULL); ws->fence_reference(&(*rdst)->sdma, NULL); FREE(*rdst); } *rdst = rsrc; }
static void i915_drm_fence_reference(struct i915_winsys *iws, struct pipe_fence_handle **ptr, struct pipe_fence_handle *fence) { struct i915_drm_fence *old = (struct i915_drm_fence *)*ptr; struct i915_drm_fence *f = (struct i915_drm_fence *)fence; if (pipe_reference(&((struct i915_drm_fence *)(*ptr))->reference, &f->reference)) { if (old->bo) drm_intel_bo_unreference(old->bo); FREE(old); } *ptr = fence; }
static struct pipe_sampler_view * vc4_create_sampler_view(struct pipe_context *pctx, struct pipe_resource *prsc, const struct pipe_sampler_view *cso) { struct pipe_sampler_view *so = malloc(sizeof(*so)); if (!so) return NULL; *so = *cso; pipe_reference(NULL, &prsc->reference); so->texture = prsc; so->reference.count = 1; so->context = pctx; return so; }
static bool radeon_winsys_unref(struct radeon_winsys *ws) { struct radeon_drm_winsys *rws = (struct radeon_drm_winsys*)ws; bool destroy; /* When the reference counter drops to zero, remove the fd from the table. * This must happen while the mutex is locked, so that * radeon_drm_winsys_create in another thread doesn't get the winsys * from the table when the counter drops to 0. */ pipe_mutex_lock(fd_tab_mutex); destroy = pipe_reference(&rws->reference, NULL); if (destroy && fd_tab) util_hash_table_remove(fd_tab, intptr_to_pointer(rws->fd)); pipe_mutex_unlock(fd_tab_mutex); return destroy; }
static void virgl_vtest_resource_reference(struct virgl_vtest_winsys *vtws, struct virgl_hw_res **dres, struct virgl_hw_res *sres) { struct virgl_hw_res *old = *dres; if (pipe_reference(&(*dres)->reference, &sres->reference)) { if (!can_cache_resource(old)) { virgl_hw_res_destroy(vtws, old); } else { pipe_mutex_lock(vtws->mutex); virgl_cache_list_check_free(vtws); old->start = os_time_get(); old->end = old->start + vtws->usecs; LIST_ADDTAIL(&old->head, &vtws->delayed); vtws->num_delayed++; pipe_mutex_unlock(vtws->mutex); } } *dres = sres; }
static struct pipe_sampler_view * vc4_create_sampler_view(struct pipe_context *pctx, struct pipe_resource *prsc, const struct pipe_sampler_view *cso) { struct pipe_sampler_view *so = malloc(sizeof(*so)); if (!so) return NULL; *so = *cso; pipe_reference(NULL, &prsc->reference); /* There is no hardware level clamping, and the start address of a * texture may be misaligned, so in that case we have to copy to a * temporary. */ if (so->u.tex.first_level) { struct vc4_resource *shadow_parent = vc4_resource(prsc); struct pipe_resource tmpl = shadow_parent->base.b; struct vc4_resource *clone; tmpl.width0 = u_minify(tmpl.width0, so->u.tex.first_level); tmpl.height0 = u_minify(tmpl.height0, so->u.tex.first_level); tmpl.last_level = so->u.tex.last_level - so->u.tex.first_level; prsc = vc4_resource_create(pctx->screen, &tmpl); clone = vc4_resource(prsc); clone->shadow_parent = &shadow_parent->base.b; /* Flag it as needing update of the contents from the parent. */ clone->writes = shadow_parent->writes - 1; } so->texture = prsc; so->reference.count = 1; so->context = pctx; return so; }
static struct pipe_sampler_view * fd3_sampler_view_create(struct pipe_context *pctx, struct pipe_resource *prsc, const struct pipe_sampler_view *cso) { struct fd3_pipe_sampler_view *so = CALLOC_STRUCT(fd3_pipe_sampler_view); struct fd_resource *rsc = fd_resource(prsc); unsigned miplevels = cso->u.tex.last_level - cso->u.tex.first_level; if (!so) return NULL; so->base = *cso; pipe_reference(NULL, &prsc->reference); so->base.texture = prsc; so->base.reference.count = 1; so->base.context = pctx; so->tex_resource = rsc; so->mipaddrs = 1 + miplevels; so->texconst0 = A3XX_TEX_CONST_0_TYPE(tex_type(prsc->target)) | A3XX_TEX_CONST_0_FMT(fd3_pipe2tex(cso->format)) | A3XX_TEX_CONST_0_MIPLVLS(miplevels) | fd3_tex_swiz(cso->format, cso->swizzle_r, cso->swizzle_g, cso->swizzle_b, cso->swizzle_a); so->texconst1 = A3XX_TEX_CONST_1_FETCHSIZE(fd3_pipe2fetchsize(cso->format)) | A3XX_TEX_CONST_1_WIDTH(prsc->width0) | A3XX_TEX_CONST_1_HEIGHT(prsc->height0); /* when emitted, A3XX_TEX_CONST_2_INDX() must be OR'd in: */ so->texconst2 = A3XX_TEX_CONST_2_PITCH(rsc->slices[0].pitch * rsc->cpp); so->texconst3 = 0x00000000; /* ??? */ return &so->base; }
static void * vc5_vertex_state_create(struct pipe_context *pctx, unsigned num_elements, const struct pipe_vertex_element *elements) { struct vc5_context *vc5 = vc5_context(pctx); struct vc5_vertex_stateobj *so = CALLOC_STRUCT(vc5_vertex_stateobj); if (!so) return NULL; memcpy(so->pipe, elements, sizeof(*elements) * num_elements); so->num_elements = num_elements; for (int i = 0; i < so->num_elements; i++) { const struct pipe_vertex_element *elem = &elements[i]; const struct util_format_description *desc = util_format_description(elem->src_format); uint32_t r_size = desc->channel[0].size; struct V3D33_GL_SHADER_STATE_ATTRIBUTE_RECORD attr_unpacked = { /* vec_size == 0 means 4 */ .vec_size = desc->nr_channels & 3, .signed_int_type = (desc->channel[0].type == UTIL_FORMAT_TYPE_SIGNED), .normalized_int_type = desc->channel[0].normalized, .read_as_int_uint = desc->channel[0].pure_integer, .instance_divisor = elem->instance_divisor, }; switch (desc->channel[0].type) { case UTIL_FORMAT_TYPE_FLOAT: if (r_size == 32) { attr_unpacked.type = ATTRIBUTE_FLOAT; } else { assert(r_size == 16); attr_unpacked.type = ATTRIBUTE_HALF_FLOAT; } break; case UTIL_FORMAT_TYPE_SIGNED: case UTIL_FORMAT_TYPE_UNSIGNED: switch (r_size) { case 32: attr_unpacked.type = ATTRIBUTE_INT; break; case 16: attr_unpacked.type = ATTRIBUTE_SHORT; break; case 10: attr_unpacked.type = ATTRIBUTE_INT2_10_10_10; break; case 8: attr_unpacked.type = ATTRIBUTE_BYTE; break; default: fprintf(stderr, "format %s unsupported\n", desc->name); attr_unpacked.type = ATTRIBUTE_BYTE; abort(); } break; default: fprintf(stderr, "format %s unsupported\n", desc->name); abort(); } const uint32_t size = cl_packet_length(GL_SHADER_STATE_ATTRIBUTE_RECORD); V3D33_GL_SHADER_STATE_ATTRIBUTE_RECORD_pack(NULL, (uint8_t *)&so->attrs[i * size], &attr_unpacked); } /* Set up the default attribute values in case any of the vertex * elements use them. */ so->default_attribute_values = vc5_bo_alloc(vc5->screen, VC5_MAX_ATTRIBUTES * 4 * sizeof(float), "default attributes"); uint32_t *attrs = vc5_bo_map(so->default_attribute_values); for (int i = 0; i < VC5_MAX_ATTRIBUTES; i++) { attrs[i * 4 + 0] = 0; attrs[i * 4 + 1] = 0; attrs[i * 4 + 2] = 0; if (i < so->num_elements && util_format_is_pure_integer(so->pipe[i].src_format)) { attrs[i * 4 + 3] = 1; } else { attrs[i * 4 + 3] = fui(1.0); } } return so; } static void vc5_vertex_state_bind(struct pipe_context *pctx, void *hwcso) { struct vc5_context *vc5 = vc5_context(pctx); vc5->vtx = hwcso; vc5->dirty |= VC5_DIRTY_VTXSTATE; } static void vc5_set_constant_buffer(struct pipe_context *pctx, uint shader, uint index, const struct pipe_constant_buffer *cb) { struct vc5_context *vc5 = vc5_context(pctx); struct vc5_constbuf_stateobj *so = &vc5->constbuf[shader]; util_copy_constant_buffer(&so->cb[index], cb); /* Note that the state tracker can unbind constant buffers by * passing NULL here. */ if (unlikely(!cb)) { so->enabled_mask &= ~(1 << index); so->dirty_mask &= ~(1 << index); return; } so->enabled_mask |= 1 << index; so->dirty_mask |= 1 << index; vc5->dirty |= VC5_DIRTY_CONSTBUF; } static void vc5_set_framebuffer_state(struct pipe_context *pctx, const struct pipe_framebuffer_state *framebuffer) { struct vc5_context *vc5 = vc5_context(pctx); struct pipe_framebuffer_state *cso = &vc5->framebuffer; unsigned i; vc5->job = NULL; for (i = 0; i < framebuffer->nr_cbufs; i++) pipe_surface_reference(&cso->cbufs[i], framebuffer->cbufs[i]); for (; i < vc5->framebuffer.nr_cbufs; i++) pipe_surface_reference(&cso->cbufs[i], NULL); cso->nr_cbufs = framebuffer->nr_cbufs; pipe_surface_reference(&cso->zsbuf, framebuffer->zsbuf); cso->width = framebuffer->width; cso->height = framebuffer->height; vc5->dirty |= VC5_DIRTY_FRAMEBUFFER; } static struct vc5_texture_stateobj * vc5_get_stage_tex(struct vc5_context *vc5, enum pipe_shader_type shader) { switch (shader) { case PIPE_SHADER_FRAGMENT: vc5->dirty |= VC5_DIRTY_FRAGTEX; return &vc5->fragtex; break; case PIPE_SHADER_VERTEX: vc5->dirty |= VC5_DIRTY_VERTTEX; return &vc5->verttex; break; default: fprintf(stderr, "Unknown shader target %d\n", shader); abort(); } } static uint32_t translate_wrap(uint32_t pipe_wrap, bool using_nearest) { switch (pipe_wrap) { case PIPE_TEX_WRAP_REPEAT: return 0; case PIPE_TEX_WRAP_CLAMP_TO_EDGE: return 1; case PIPE_TEX_WRAP_MIRROR_REPEAT: return 2; case PIPE_TEX_WRAP_CLAMP_TO_BORDER: return 3; case PIPE_TEX_WRAP_CLAMP: return (using_nearest ? 1 : 3); default: unreachable("Unknown wrap mode"); } } static void * vc5_create_sampler_state(struct pipe_context *pctx, const struct pipe_sampler_state *cso) { struct vc5_sampler_state *so = CALLOC_STRUCT(vc5_sampler_state); if (!so) return NULL; memcpy(so, cso, sizeof(*cso)); bool either_nearest = (cso->mag_img_filter == PIPE_TEX_MIPFILTER_NEAREST || cso->min_img_filter == PIPE_TEX_MIPFILTER_NEAREST); struct V3D33_TEXTURE_UNIFORM_PARAMETER_0_CFG_MODE1 p0_unpacked = { .s_wrap_mode = translate_wrap(cso->wrap_s, either_nearest), .t_wrap_mode = translate_wrap(cso->wrap_t, either_nearest), .r_wrap_mode = translate_wrap(cso->wrap_r, either_nearest), }; V3D33_TEXTURE_UNIFORM_PARAMETER_0_CFG_MODE1_pack(NULL, (uint8_t *)&so->p0, &p0_unpacked); struct V3D33_TEXTURE_SHADER_STATE state_unpacked = { cl_packet_header(TEXTURE_SHADER_STATE), .min_level_of_detail = MAX2(cso->min_lod, 0.0), .depth_compare_function = cso->compare_func, .fixed_bias = cso->lod_bias, }; STATIC_ASSERT(ARRAY_SIZE(so->texture_shader_state) == cl_packet_length(TEXTURE_SHADER_STATE)); cl_packet_pack(TEXTURE_SHADER_STATE)(NULL, so->texture_shader_state, &state_unpacked); return so; } static void vc5_sampler_states_bind(struct pipe_context *pctx, enum pipe_shader_type shader, unsigned start, unsigned nr, void **hwcso) { struct vc5_context *vc5 = vc5_context(pctx); struct vc5_texture_stateobj *stage_tex = vc5_get_stage_tex(vc5, shader); assert(start == 0); unsigned i; unsigned new_nr = 0; for (i = 0; i < nr; i++) { if (hwcso[i]) new_nr = i + 1; stage_tex->samplers[i] = hwcso[i]; } for (; i < stage_tex->num_samplers; i++) { stage_tex->samplers[i] = NULL; } stage_tex->num_samplers = new_nr; } static uint32_t translate_swizzle(unsigned char pipe_swizzle) { switch (pipe_swizzle) { case PIPE_SWIZZLE_0: return 0; case PIPE_SWIZZLE_1: return 1; case PIPE_SWIZZLE_X: case PIPE_SWIZZLE_Y: case PIPE_SWIZZLE_Z: case PIPE_SWIZZLE_W: return 2 + pipe_swizzle; default: unreachable("unknown swizzle"); } } static struct pipe_sampler_view * vc5_create_sampler_view(struct pipe_context *pctx, struct pipe_resource *prsc, const struct pipe_sampler_view *cso) { struct vc5_sampler_view *so = CALLOC_STRUCT(vc5_sampler_view); struct vc5_resource *rsc = vc5_resource(prsc); if (!so) return NULL; so->base = *cso; pipe_reference(NULL, &prsc->reference); struct V3D33_TEXTURE_UNIFORM_PARAMETER_1_CFG_MODE1 unpacked = { }; unpacked.return_word_0_of_texture_data = true; if (vc5_get_tex_return_size(cso->format) == 16) { unpacked.return_word_1_of_texture_data = true; } else { int chans = vc5_get_tex_return_channels(cso->format); if (chans > 1) unpacked.return_word_1_of_texture_data = true; if (chans > 2) unpacked.return_word_2_of_texture_data = true; if (chans > 3) unpacked.return_word_3_of_texture_data = true; } V3D33_TEXTURE_UNIFORM_PARAMETER_1_CFG_MODE1_pack(NULL, (uint8_t *)&so->p1, &unpacked); /* Compute the sampler view's swizzle up front. This will be plugged * into either the sampler (for 16-bit returns) or the shader's * texture key (for 32) */ uint8_t view_swizzle[4] = { cso->swizzle_r, cso->swizzle_g, cso->swizzle_b, cso->swizzle_a }; const uint8_t *fmt_swizzle = vc5_get_format_swizzle(so->base.format); util_format_compose_swizzles(fmt_swizzle, view_swizzle, so->swizzle); so->base.texture = prsc; so->base.reference.count = 1; so->base.context = pctx; struct V3D33_TEXTURE_SHADER_STATE state_unpacked = { cl_packet_header(TEXTURE_SHADER_STATE), .image_width = prsc->width0, .image_height = prsc->height0, .image_depth = prsc->depth0, .texture_type = rsc->tex_format, .srgb = util_format_is_srgb(cso->format), .base_level = cso->u.tex.first_level, .array_stride_64_byte_aligned = rsc->cube_map_stride / 64, }; /* Note: Contrary to the docs, the swizzle still applies even * if the return size is 32. It's just that you probably want * to swizzle in the shader, because you need the Y/Z/W * channels to be defined. */ if (vc5_get_tex_return_size(cso->format) != 32) { state_unpacked.swizzle_r = translate_swizzle(so->swizzle[0]); state_unpacked.swizzle_g = translate_swizzle(so->swizzle[1]); state_unpacked.swizzle_b = translate_swizzle(so->swizzle[2]); state_unpacked.swizzle_a = translate_swizzle(so->swizzle[3]); } else { state_unpacked.swizzle_r = translate_swizzle(PIPE_SWIZZLE_X); state_unpacked.swizzle_g = translate_swizzle(PIPE_SWIZZLE_Y); state_unpacked.swizzle_b = translate_swizzle(PIPE_SWIZZLE_Z); state_unpacked.swizzle_a = translate_swizzle(PIPE_SWIZZLE_W); } /* XXX: While we need to use this flag to enable tiled * resource sharing (even a small shared buffer should be UIF, * not UBLINEAR or raster), this is also at the moment * patching up the fact that our resource layout's decisions * about XOR don't quite match the HW's. */ switch (rsc->slices[0].tiling) { case VC5_TILING_UIF_NO_XOR: case VC5_TILING_UIF_XOR: state_unpacked.level_0_is_strictly_uif = true; state_unpacked.level_0_xor_enable = false; break; default: break; } STATIC_ASSERT(ARRAY_SIZE(so->texture_shader_state) == cl_packet_length(TEXTURE_SHADER_STATE)); cl_packet_pack(TEXTURE_SHADER_STATE)(NULL, so->texture_shader_state, &state_unpacked); return &so->base; } static void vc5_sampler_view_destroy(struct pipe_context *pctx, struct pipe_sampler_view *view) { pipe_resource_reference(&view->texture, NULL); free(view); } static void vc5_set_sampler_views(struct pipe_context *pctx, enum pipe_shader_type shader, unsigned start, unsigned nr, struct pipe_sampler_view **views) { struct vc5_context *vc5 = vc5_context(pctx); struct vc5_texture_stateobj *stage_tex = vc5_get_stage_tex(vc5, shader); unsigned i; unsigned new_nr = 0; assert(start == 0); for (i = 0; i < nr; i++) { if (views[i]) new_nr = i + 1; pipe_sampler_view_reference(&stage_tex->textures[i], views[i]); } for (; i < stage_tex->num_textures; i++) { pipe_sampler_view_reference(&stage_tex->textures[i], NULL); } stage_tex->num_textures = new_nr; } static struct pipe_stream_output_target * vc5_create_stream_output_target(struct pipe_context *pctx, struct pipe_resource *prsc, unsigned buffer_offset, unsigned buffer_size) { struct pipe_stream_output_target *target; target = CALLOC_STRUCT(pipe_stream_output_target); if (!target) return NULL; pipe_reference_init(&target->reference, 1); pipe_resource_reference(&target->buffer, prsc); target->context = pctx; target->buffer_offset = buffer_offset; target->buffer_size = buffer_size; return target; } static void vc5_stream_output_target_destroy(struct pipe_context *pctx, struct pipe_stream_output_target *target) { pipe_resource_reference(&target->buffer, NULL); free(target); }
static struct pipe_sampler_view * fd5_sampler_view_create(struct pipe_context *pctx, struct pipe_resource *prsc, const struct pipe_sampler_view *cso) { struct fd5_pipe_sampler_view *so = CALLOC_STRUCT(fd5_pipe_sampler_view); struct fd_resource *rsc = fd_resource(prsc); enum pipe_format format = cso->format; unsigned lvl, layers = 0; if (!so) return NULL; if (format == PIPE_FORMAT_X32_S8X24_UINT) { rsc = rsc->stencil; format = rsc->base.format; } so->base = *cso; pipe_reference(NULL, &prsc->reference); so->base.texture = prsc; so->base.reference.count = 1; so->base.context = pctx; so->texconst0 = A5XX_TEX_CONST_0_FMT(fd5_pipe2tex(format)) | A5XX_TEX_CONST_0_SAMPLES(fd_msaa_samples(prsc->nr_samples)) | fd5_tex_swiz(format, cso->swizzle_r, cso->swizzle_g, cso->swizzle_b, cso->swizzle_a); /* NOTE: since we sample z24s8 using 8888_UINT format, the swizzle * we get isn't quite right. Use SWAP(XYZW) as a cheap and cheerful * way to re-arrange things so stencil component is where the swiz * expects. * * Note that gallium expects stencil sampler to return (s,s,s,s) * which isn't quite true. To make that happen we'd have to massage * the swizzle. But in practice only the .x component is used. */ if (format == PIPE_FORMAT_X24S8_UINT) { so->texconst0 |= A5XX_TEX_CONST_0_SWAP(XYZW); } if (util_format_is_srgb(format)) { if (use_astc_srgb_workaround(pctx, format)) so->astc_srgb = true; so->texconst0 |= A5XX_TEX_CONST_0_SRGB; } if (cso->target == PIPE_BUFFER) { unsigned elements = cso->u.buf.size / util_format_get_blocksize(format); lvl = 0; so->texconst1 = A5XX_TEX_CONST_1_WIDTH(elements) | A5XX_TEX_CONST_1_HEIGHT(1); so->texconst2 = A5XX_TEX_CONST_2_FETCHSIZE(fd5_pipe2fetchsize(format)) | A5XX_TEX_CONST_2_PITCH(elements * rsc->cpp); so->offset = cso->u.buf.offset; } else { unsigned miplevels; lvl = fd_sampler_first_level(cso); miplevels = fd_sampler_last_level(cso) - lvl; layers = cso->u.tex.last_layer - cso->u.tex.first_layer + 1; so->texconst0 |= A5XX_TEX_CONST_0_MIPLVLS(miplevels); so->texconst1 = A5XX_TEX_CONST_1_WIDTH(u_minify(prsc->width0, lvl)) | A5XX_TEX_CONST_1_HEIGHT(u_minify(prsc->height0, lvl)); so->texconst2 = A5XX_TEX_CONST_2_FETCHSIZE(fd5_pipe2fetchsize(format)) | A5XX_TEX_CONST_2_PITCH( util_format_get_nblocksx( format, rsc->slices[lvl].pitch) * rsc->cpp); so->offset = fd_resource_offset(rsc, lvl, cso->u.tex.first_layer); } so->texconst2 |= A5XX_TEX_CONST_2_TYPE(fd5_tex_type(cso->target)); switch (cso->target) { case PIPE_TEXTURE_RECT: case PIPE_TEXTURE_1D: case PIPE_TEXTURE_2D: so->texconst3 = A5XX_TEX_CONST_3_ARRAY_PITCH(rsc->layer_size); so->texconst5 = A5XX_TEX_CONST_5_DEPTH(1); break; case PIPE_TEXTURE_1D_ARRAY: case PIPE_TEXTURE_2D_ARRAY: so->texconst3 = A5XX_TEX_CONST_3_ARRAY_PITCH(rsc->layer_size); so->texconst5 = A5XX_TEX_CONST_5_DEPTH(layers); break; case PIPE_TEXTURE_CUBE: case PIPE_TEXTURE_CUBE_ARRAY: so->texconst3 = A5XX_TEX_CONST_3_ARRAY_PITCH(rsc->layer_size); so->texconst5 = A5XX_TEX_CONST_5_DEPTH(layers / 6); break; case PIPE_TEXTURE_3D: so->texconst3 = A5XX_TEX_CONST_3_ARRAY_PITCH(rsc->slices[lvl].size0); so->texconst5 = A5XX_TEX_CONST_5_DEPTH(u_minify(prsc->depth0, lvl)); break; default: so->texconst3 = 0x00000000; break; } return &so->base; }
struct radeon_winsys *radeon_drm_winsys_create(int fd) { struct radeon_drm_winsys *ws; if (!fd_tab) { fd_tab = util_hash_table_create(hash_fd, compare_fd); } ws = util_hash_table_get(fd_tab, intptr_to_pointer(fd)); if (ws) { pipe_reference(NULL, &ws->base.reference); return &ws->base; } ws = CALLOC_STRUCT(radeon_drm_winsys); if (!ws) { return NULL; } ws->fd = fd; util_hash_table_set(fd_tab, intptr_to_pointer(fd), ws); if (!do_winsys_init(ws)) goto fail; /* Create managers. */ ws->kman = radeon_bomgr_create(ws); if (!ws->kman) goto fail; ws->cman = pb_cache_manager_create(ws->kman, 1000000); if (!ws->cman) goto fail; if (ws->gen >= DRV_R600) { ws->surf_man = radeon_surface_manager_new(fd); if (!ws->surf_man) goto fail; } /* init reference */ pipe_reference_init(&ws->base.reference, 1); /* Set functions. */ ws->base.destroy = radeon_winsys_destroy; ws->base.query_info = radeon_query_info; ws->base.cs_request_feature = radeon_cs_request_feature; ws->base.surface_init = radeon_drm_winsys_surface_init; ws->base.surface_best = radeon_drm_winsys_surface_best; ws->base.query_value = radeon_query_value; radeon_bomgr_init_functions(ws); radeon_drm_cs_init_functions(ws); pipe_mutex_init(ws->hyperz_owner_mutex); pipe_mutex_init(ws->cmask_owner_mutex); pipe_mutex_init(ws->cs_stack_lock); p_atomic_set(&ws->ncs, 0); pipe_semaphore_init(&ws->cs_queued, 0); pipe_condvar_init(ws->cs_queue_empty); if (ws->num_cpus > 1 && debug_get_option_thread()) ws->thread = pipe_thread_create(radeon_drm_cs_emit_ioctl, ws); return &ws->base; fail: if (ws->cman) ws->cman->destroy(ws->cman); if (ws->kman) ws->kman->destroy(ws->kman); if (ws->surf_man) radeon_surface_manager_free(ws->surf_man); FREE(ws); return NULL; }
PUBLIC struct radeon_winsys * radeon_drm_winsys_create(int fd, radeon_screen_create_t screen_create) { struct radeon_drm_winsys *ws; pipe_mutex_lock(fd_tab_mutex); if (!fd_tab) { fd_tab = util_hash_table_create(hash_fd, compare_fd); } ws = util_hash_table_get(fd_tab, intptr_to_pointer(fd)); if (ws) { pipe_reference(NULL, &ws->reference); pipe_mutex_unlock(fd_tab_mutex); return &ws->base; } ws = CALLOC_STRUCT(radeon_drm_winsys); if (!ws) { pipe_mutex_unlock(fd_tab_mutex); return NULL; } ws->fd = dup(fd); if (!do_winsys_init(ws)) goto fail; /* Create managers. */ ws->kman = radeon_bomgr_create(ws); if (!ws->kman) goto fail; ws->cman = pb_cache_manager_create(ws->kman, 500000, 2.0f, 0, MIN2(ws->info.vram_size, ws->info.gart_size)); if (!ws->cman) goto fail; if (ws->gen >= DRV_R600) { ws->surf_man = radeon_surface_manager_new(ws->fd); if (!ws->surf_man) goto fail; } /* init reference */ pipe_reference_init(&ws->reference, 1); /* Set functions. */ ws->base.unref = radeon_winsys_unref; ws->base.destroy = radeon_winsys_destroy; ws->base.query_info = radeon_query_info; ws->base.cs_request_feature = radeon_cs_request_feature; ws->base.query_value = radeon_query_value; ws->base.read_registers = radeon_read_registers; radeon_bomgr_init_functions(ws); radeon_drm_cs_init_functions(ws); radeon_surface_init_functions(ws); pipe_mutex_init(ws->hyperz_owner_mutex); pipe_mutex_init(ws->cmask_owner_mutex); pipe_mutex_init(ws->cs_stack_lock); ws->ncs = 0; pipe_semaphore_init(&ws->cs_queued, 0); if (ws->num_cpus > 1 && debug_get_option_thread()) ws->thread = pipe_thread_create(radeon_drm_cs_emit_ioctl, ws); /* Create the screen at the end. The winsys must be initialized * completely. * * Alternatively, we could create the screen based on "ws->gen" * and link all drivers into one binary blob. */ ws->base.screen = screen_create(&ws->base); if (!ws->base.screen) { radeon_winsys_destroy(&ws->base); pipe_mutex_unlock(fd_tab_mutex); return NULL; } util_hash_table_set(fd_tab, intptr_to_pointer(ws->fd), ws); /* We must unlock the mutex once the winsys is fully initialized, so that * other threads attempting to create the winsys from the same fd will * get a fully initialized winsys and not just half-way initialized. */ pipe_mutex_unlock(fd_tab_mutex); return &ws->base; fail: pipe_mutex_unlock(fd_tab_mutex); if (ws->cman) ws->cman->destroy(ws->cman); if (ws->kman) ws->kman->destroy(ws->kman); if (ws->surf_man) radeon_surface_manager_free(ws->surf_man); if (ws->fd >= 0) close(ws->fd); FREE(ws); return NULL; }
static struct pipe_sampler_view * fd3_sampler_view_create(struct pipe_context *pctx, struct pipe_resource *prsc, const struct pipe_sampler_view *cso) { struct fd3_pipe_sampler_view *so = CALLOC_STRUCT(fd3_pipe_sampler_view); struct fd_resource *rsc = fd_resource(prsc); unsigned lvl; uint32_t sz2 = 0; if (!so) return NULL; so->base = *cso; pipe_reference(NULL, &prsc->reference); so->base.texture = prsc; so->base.reference.count = 1; so->base.context = pctx; so->texconst0 = A3XX_TEX_CONST_0_TYPE(tex_type(prsc->target)) | A3XX_TEX_CONST_0_FMT(fd3_pipe2tex(cso->format)) | fd3_tex_swiz(cso->format, cso->swizzle_r, cso->swizzle_g, cso->swizzle_b, cso->swizzle_a); if (prsc->target == PIPE_BUFFER || util_format_is_pure_integer(cso->format)) so->texconst0 |= A3XX_TEX_CONST_0_NOCONVERT; if (util_format_is_srgb(cso->format)) so->texconst0 |= A3XX_TEX_CONST_0_SRGB; if (prsc->target == PIPE_BUFFER) { lvl = 0; so->texconst1 = A3XX_TEX_CONST_1_FETCHSIZE(fd3_pipe2fetchsize(cso->format)) | A3XX_TEX_CONST_1_WIDTH(cso->u.buf.size / util_format_get_blocksize(cso->format)) | A3XX_TEX_CONST_1_HEIGHT(1); } else { unsigned miplevels; lvl = fd_sampler_first_level(cso); miplevels = fd_sampler_last_level(cso) - lvl; so->texconst0 |= A3XX_TEX_CONST_0_MIPLVLS(miplevels); so->texconst1 = A3XX_TEX_CONST_1_FETCHSIZE(fd3_pipe2fetchsize(cso->format)) | A3XX_TEX_CONST_1_WIDTH(u_minify(prsc->width0, lvl)) | A3XX_TEX_CONST_1_HEIGHT(u_minify(prsc->height0, lvl)); } /* when emitted, A3XX_TEX_CONST_2_INDX() must be OR'd in: */ so->texconst2 = A3XX_TEX_CONST_2_PITCH(fd3_pipe2nblocksx(cso->format, rsc->slices[lvl].pitch) * rsc->cpp); switch (prsc->target) { case PIPE_TEXTURE_1D_ARRAY: case PIPE_TEXTURE_2D_ARRAY: so->texconst3 = A3XX_TEX_CONST_3_DEPTH(prsc->array_size - 1) | A3XX_TEX_CONST_3_LAYERSZ1(rsc->slices[0].size0); break; case PIPE_TEXTURE_3D: so->texconst3 = A3XX_TEX_CONST_3_DEPTH(u_minify(prsc->depth0, lvl)) | A3XX_TEX_CONST_3_LAYERSZ1(rsc->slices[lvl].size0); while (lvl < cso->u.tex.last_level && sz2 != rsc->slices[lvl+1].size0) sz2 = rsc->slices[++lvl].size0; so->texconst3 |= A3XX_TEX_CONST_3_LAYERSZ2(sz2); break; default: so->texconst3 = 0x00000000; break; } return &so->base; }