static void finalize_cbuf_state(struct ilo_context *ilo, struct ilo_cbuf_state *cbuf, const struct ilo_shader_state *sh) { uint32_t upload_mask = cbuf->enabled_mask; /* skip CBUF0 if the kernel does not need it */ upload_mask &= ~ilo_shader_get_kernel_param(sh, ILO_KERNEL_SKIP_CBUF0_UPLOAD); while (upload_mask) { const enum pipe_format elem_format = PIPE_FORMAT_R32G32B32A32_FLOAT; unsigned offset, i; i = u_bit_scan(&upload_mask); /* no need to upload */ if (cbuf->cso[i].resource) continue; u_upload_data(ilo->uploader, 0, cbuf->cso[i].user_buffer_size, cbuf->cso[i].user_buffer, &offset, &cbuf->cso[i].resource); ilo_gpe_init_view_surface_for_buffer(ilo->dev, ilo_buffer(cbuf->cso[i].resource), offset, cbuf->cso[i].user_buffer_size, util_format_get_blocksize(elem_format), elem_format, false, false, &cbuf->cso[i].surface); ilo->state_vector.dirty |= ILO_DIRTY_CBUF; } }
static void gen6_emit_launch_grid_surface_const(struct ilo_render *r, const struct ilo_state_vector *vec, struct ilo_render_launch_grid_session *session) { const struct ilo_shader_state *cs = vec->cs; uint32_t *surface_state = r->state.cs.SURFACE_STATE; struct ilo_view_surface view; int base, count; ILO_DEV_ASSERT(r->dev, 7, 7.5); base = ilo_shader_get_kernel_param(cs, ILO_KERNEL_SURFACE_CONST_BASE); count = ilo_shader_get_kernel_param(cs, ILO_KERNEL_SURFACE_CONST_COUNT); if (!count) return; ilo_gpe_init_view_surface_for_buffer(r->dev, ilo_buffer(session->input->buffer), session->input->buffer_offset, session->input->buffer_size, 1, PIPE_FORMAT_NONE, false, false, &view); assert(count == 1 && session->input->buffer); surface_state[base] = gen6_SURFACE_STATE(r->builder, &view, false); }
static struct pipe_sampler_view * ilo_create_sampler_view(struct pipe_context *pipe, struct pipe_resource *res, const struct pipe_sampler_view *templ) { const struct ilo_dev_info *dev = ilo_context(pipe)->dev; struct ilo_view_cso *view; view = MALLOC_STRUCT(ilo_view_cso); assert(view); view->base = *templ; pipe_reference_init(&view->base.reference, 1); view->base.texture = NULL; pipe_resource_reference(&view->base.texture, res); view->base.context = pipe; if (res->target == PIPE_BUFFER) { const unsigned elem_size = util_format_get_blocksize(templ->format); const unsigned first_elem = templ->u.buf.first_element; const unsigned num_elems = templ->u.buf.last_element - first_elem + 1; ilo_gpe_init_view_surface_for_buffer(dev, ilo_buffer(res), first_elem * elem_size, num_elems * elem_size, elem_size, templ->format, false, false, &view->surface); } else { struct ilo_texture *tex = ilo_texture(res); /* warn about degraded performance because of a missing binding flag */ if (tex->layout.tiling == INTEL_TILING_NONE && !(tex->base.bind & PIPE_BIND_SAMPLER_VIEW)) { ilo_warn("creating sampler view for a resource " "not created for sampling\n"); } ilo_gpe_init_view_surface_for_texture(dev, tex, templ->format, templ->u.tex.first_level, templ->u.tex.last_level - templ->u.tex.first_level + 1, templ->u.tex.first_layer, templ->u.tex.last_layer - templ->u.tex.first_layer + 1, false, &view->surface); } return &view->base; }
static void gen6_emit_launch_grid_surface_global(struct ilo_render *r, const struct ilo_state_vector *vec, struct ilo_render_launch_grid_session *session) { const struct ilo_shader_state *cs = vec->cs; const struct ilo_global_binding_cso *bindings = util_dynarray_begin(&vec->global_binding.bindings); uint32_t *surface_state = r->state.cs.SURFACE_STATE; int base, count, i; ILO_DEV_ASSERT(r->dev, 7, 7.5); base = ilo_shader_get_kernel_param(cs, ILO_KERNEL_CS_SURFACE_GLOBAL_BASE); count = ilo_shader_get_kernel_param(cs, ILO_KERNEL_CS_SURFACE_GLOBAL_COUNT); if (!count) return; if (base + count > Elements(r->state.cs.SURFACE_STATE)) { ilo_warn("too many global bindings\n"); count = Elements(r->state.cs.SURFACE_STATE) - base; } /* SURFACE_STATEs for global bindings */ surface_state += base; for (i = 0; i < count; i++) { if (i < vec->global_binding.count && bindings[i].resource) { const struct ilo_buffer *buf = ilo_buffer(bindings[i].resource); struct ilo_view_surface view; assert(bindings[i].resource->target == PIPE_BUFFER); ilo_gpe_init_view_surface_for_buffer(r->dev, buf, 0, buf->bo_size, 1, PIPE_FORMAT_NONE, true, true, &view); surface_state[i] = gen6_SURFACE_STATE(r->builder, &view, true); } else { surface_state[i] = 0; } } }
static void ilo_set_constant_buffer(struct pipe_context *pipe, uint shader, uint index, struct pipe_constant_buffer *buf) { const struct ilo_dev_info *dev = ilo_context(pipe)->dev; struct ilo_state_vector *vec = &ilo_context(pipe)->state_vector; struct ilo_cbuf_state *cbuf = &vec->cbuf[shader]; const unsigned count = 1; unsigned i; assert(shader < Elements(vec->cbuf)); assert(index + count <= Elements(vec->cbuf[shader].cso)); if (buf) { for (i = 0; i < count; i++) { struct ilo_cbuf_cso *cso = &cbuf->cso[index + i]; pipe_resource_reference(&cso->resource, buf[i].buffer); if (buf[i].buffer) { const enum pipe_format elem_format = PIPE_FORMAT_R32G32B32A32_FLOAT; ilo_gpe_init_view_surface_for_buffer(dev, ilo_buffer(buf[i].buffer), buf[i].buffer_offset, buf[i].buffer_size, util_format_get_blocksize(elem_format), elem_format, false, false, &cso->surface); cso->user_buffer = NULL; cso->user_buffer_size = 0; cbuf->enabled_mask |= 1 << (index + i); } else if (buf[i].user_buffer) { cso->surface.bo = NULL; /* buffer_offset does not apply for user buffer */ cso->user_buffer = buf[i].user_buffer; cso->user_buffer_size = buf[i].buffer_size; cbuf->enabled_mask |= 1 << (index + i); } else { cso->surface.bo = NULL; cso->user_buffer = NULL; cso->user_buffer_size = 0; cbuf->enabled_mask &= ~(1 << (index + i)); } } } else { for (i = 0; i < count; i++) { struct ilo_cbuf_cso *cso = &cbuf->cso[index + i]; pipe_resource_reference(&cso->resource, NULL); cso->surface.bo = NULL; cso->user_buffer = NULL; cso->user_buffer_size = 0; cbuf->enabled_mask &= ~(1 << (index + i)); } } vec->dirty |= ILO_DIRTY_CBUF; }