示例#1
0
static void
etna_set_vertex_buffers(struct pipe_context *pctx, unsigned start_slot,
      unsigned num_buffers, const struct pipe_vertex_buffer *vb)
{
   struct etna_context *ctx = etna_context(pctx);
   struct etna_vertexbuf_state *so = &ctx->vertex_buffer;

   util_set_vertex_buffers_mask(so->vb, &so->enabled_mask, vb, start_slot, num_buffers);
   so->count = util_last_bit(so->enabled_mask);

   for (unsigned idx = start_slot; idx < start_slot + num_buffers; ++idx) {
      struct compiled_set_vertex_buffer *cs = &so->cvb[idx];
      struct pipe_vertex_buffer *vbi = &so->vb[idx];

      assert(!vbi->is_user_buffer); /* XXX support user_buffer using
                                       etna_usermem_map */

      if (vbi->buffer.resource) { /* GPU buffer */
         cs->FE_VERTEX_STREAM_BASE_ADDR.bo = etna_resource(vbi->buffer.resource)->bo;
         cs->FE_VERTEX_STREAM_BASE_ADDR.offset = vbi->buffer_offset;
         cs->FE_VERTEX_STREAM_BASE_ADDR.flags = ETNA_RELOC_READ;
         cs->FE_VERTEX_STREAM_CONTROL =
            FE_VERTEX_STREAM_CONTROL_VERTEX_STRIDE(vbi->stride);
      } else {
         cs->FE_VERTEX_STREAM_BASE_ADDR.bo = NULL;
         cs->FE_VERTEX_STREAM_CONTROL = 0;
      }
   }

   ctx->dirty |= ETNA_DIRTY_VERTEX_BUFFERS;
}
示例#2
0
static void
fd_set_vertex_buffers(struct pipe_context *pctx,
		unsigned start_slot, unsigned count,
		const struct pipe_vertex_buffer *vb)
{
	struct fd_context *ctx = fd_context(pctx);
	struct fd_vertexbuf_stateobj *so = &ctx->vertexbuf;
	int i;

	/* on a2xx, pitch is encoded in the vtx fetch instruction, so
	 * we need to mark VTXSTATE as dirty as well to trigger patching
	 * and re-emitting the vtx shader:
	 */
	for (i = 0; i < count; i++) {
		bool new_enabled = vb && (vb[i].buffer || vb[i].user_buffer);
		bool old_enabled = so->vb[i].buffer || so->vb[i].user_buffer;
		uint32_t new_stride = vb ? vb[i].stride : 0;
		uint32_t old_stride = so->vb[i].stride;
		if ((new_enabled != old_enabled) || (new_stride != old_stride)) {
			ctx->dirty |= FD_DIRTY_VTXSTATE;
			break;
		}
	}

	util_set_vertex_buffers_mask(so->vb, &so->enabled_mask, vb, start_slot, count);
	so->count = util_last_bit(so->enabled_mask);

	ctx->dirty |= FD_DIRTY_VTXBUF;
}
static void
vc4_set_vertex_buffers(struct pipe_context *pctx,
                       unsigned start_slot, unsigned count,
                       const struct pipe_vertex_buffer *vb)
{
        struct vc4_context *vc4 = vc4_context(pctx);
        struct vc4_vertexbuf_stateobj *so = &vc4->vertexbuf;

        util_set_vertex_buffers_mask(so->vb, &so->enabled_mask, vb,
                                     start_slot, count);
        so->count = util_last_bit(so->enabled_mask);

        vc4->dirty |= VC4_DIRTY_VTXBUF;
}
示例#4
0
/**
 * Same as util_set_vertex_buffers_mask, but it only returns the number
 * of bound buffers.
 */
void util_set_vertex_buffers_count(struct pipe_vertex_buffer *dst,
                                   unsigned *dst_count,
                                   const struct pipe_vertex_buffer *src,
                                   unsigned start_slot, unsigned count)
{
   unsigned i;
   uint32_t enabled_buffers = 0;

   for (i = 0; i < *dst_count; i++) {
      if (dst[i].buffer || dst[i].user_buffer)
         enabled_buffers |= (1ull << i);
   }

   util_set_vertex_buffers_mask(dst, &enabled_buffers, src, start_slot,
                                count);

   *dst_count = util_last_bit(enabled_buffers);
}
static void
ilo_set_vertex_buffers(struct pipe_context *pipe,
                       unsigned start_slot, unsigned num_buffers,
                       const struct pipe_vertex_buffer *buffers)
{
   struct ilo_state_vector *vec = &ilo_context(pipe)->state_vector;
   unsigned i;

   /* no PIPE_CAP_USER_VERTEX_BUFFERS */
   if (buffers) {
      for (i = 0; i < num_buffers; i++)
         assert(!buffers[i].user_buffer);
   }

   util_set_vertex_buffers_mask(vec->vb.states,
         &vec->vb.enabled_mask, buffers, start_slot, num_buffers);

   vec->dirty |= ILO_DIRTY_VB;
}