Example #1
0
static void i915_set_vertex_buffers(struct pipe_context *pipe,
                                    unsigned count,
                                    const struct pipe_vertex_buffer *buffers)
{
   struct i915_context *i915 = i915_context(pipe);
   struct draw_context *draw = i915->draw;
   int i;

   util_copy_vertex_buffers(i915->saved_vertex_buffers,
                            &i915->saved_nr_vertex_buffers,
                            buffers, count);
#if 0
   /* XXX doesn't look like this is needed */
   /* unmap old */
   for (i = 0; i < i915->num_vertex_buffers; i++) {
      draw_set_mapped_vertex_buffer(draw, i, NULL);
   }
#endif

   /* pass-through to draw module */
   draw_set_vertex_buffers(draw, count, buffers);

   /* map new */
   for (i = 0; i < count; i++) {
      void *buf = i915_buffer(buffers[i].buffer)->data;
      draw_set_mapped_vertex_buffer(draw, i, buf);
   }
}
/**
 * Draw vertex arrays, with optional indexing, optional instancing.
 * All the other drawing functions are implemented in terms of this function.
 * Basically, map the vertex buffers (and drawing surfaces), then hand off
 * the drawing to the 'draw' module.
 */
static void
llvmpipe_draw_vbo(struct pipe_context *pipe, const struct pipe_draw_info *info)
{
   struct llvmpipe_context *lp = llvmpipe_context(pipe);
   struct draw_context *draw = lp->draw;
   const void *mapped_indices = NULL;
   unsigned i;

   if (!llvmpipe_check_render_cond(lp))
      return;

   if (lp->dirty)
      llvmpipe_update_derived( lp );

   /*
    * Map vertex buffers
    */
   for (i = 0; i < lp->num_vertex_buffers; i++) {
      const void *buf = lp->vertex_buffer[i].user_buffer;
      if (!buf)
         buf = llvmpipe_resource_data(lp->vertex_buffer[i].buffer);
      draw_set_mapped_vertex_buffer(draw, i, buf);
   }

   /* Map index buffer, if present */
   if (info->indexed) {
      mapped_indices = lp->index_buffer.user_buffer;
      if (!mapped_indices)
         mapped_indices = llvmpipe_resource_data(lp->index_buffer.buffer);

      draw_set_indexes(draw,
                       (ubyte *) mapped_indices + lp->index_buffer.offset,
                       lp->index_buffer.index_size);
   }

   llvmpipe_prepare_vertex_sampling(lp,
                                    lp->num_sampler_views[PIPE_SHADER_VERTEX],
                                    lp->sampler_views[PIPE_SHADER_VERTEX]);

   /* draw! */
   draw_vbo(draw, info);

   /*
    * unmap vertex/index buffers
    */
   for (i = 0; i < lp->num_vertex_buffers; i++) {
      draw_set_mapped_vertex_buffer(draw, i, NULL);
   }
   if (mapped_indices) {
      draw_set_indexes(draw, NULL, 0);
   }
   llvmpipe_cleanup_vertex_sampling(lp);

   /*
    * TODO: Flush only when a user vertex/index buffer is present
    * (or even better, modify draw module to do this
    * internally when this condition is seen?)
    */
   draw_flush(draw);
}
Example #3
0
/* SW TCL arrays, using Draw. */
boolean r300_swtcl_draw_arrays(struct pipe_context* pipe,
                               unsigned mode,
                               unsigned start,
                               unsigned count)
{
    struct r300_context* r300 = r300_context(pipe);
    int i;

    if (!u_trim_pipe_prim(mode, &count)) {
        return FALSE;
    }

    for (i = 0; i < r300->vertex_buffer_count; i++) {
        void* buf = pipe_buffer_map(pipe->screen,
                                    r300->vertex_buffer[i].buffer,
                                    PIPE_BUFFER_USAGE_CPU_READ);
        draw_set_mapped_vertex_buffer(r300->draw, i, buf);
    }

    draw_set_mapped_element_buffer(r300->draw, 0, NULL);

    draw_set_mapped_constant_buffer(r300->draw,
            r300->shader_constants[PIPE_SHADER_VERTEX].constants,
            r300->shader_constants[PIPE_SHADER_VERTEX].count *
                (sizeof(float) * 4));

    draw_arrays(r300->draw, mode, start, count);

    for (i = 0; i < r300->vertex_buffer_count; i++) {
        pipe_buffer_unmap(pipe->screen, r300->vertex_buffer[i].buffer);
        draw_set_mapped_vertex_buffer(r300->draw, i, NULL);
    }

    return TRUE;
}
Example #4
0
/* SW TCL elements, using Draw. */
static void r300_swtcl_draw_vbo(struct pipe_context* pipe,
                                const struct pipe_draw_info *info)
{
    struct r300_context* r300 = r300_context(pipe);
    struct pipe_transfer *vb_transfer[PIPE_MAX_ATTRIBS];
    struct pipe_transfer *ib_transfer = NULL;
    int i;
    void *indices = NULL;
    boolean indexed = info->indexed && r300->vbuf_mgr->index_buffer.buffer;

    if (r300->skip_rendering) {
        return;
    }

    r300_update_derived_state(r300);

    r300_reserve_cs_dwords(r300,
            PREP_EMIT_STATES | PREP_EMIT_VARRAYS_SWTCL |
            (indexed ? PREP_INDEXED : 0),
            indexed ? 256 : 6);

    for (i = 0; i < r300->vbuf_mgr->nr_vertex_buffers; i++) {
        if (r300->vbuf_mgr->vertex_buffer[i].buffer) {
            void *buf = pipe_buffer_map(pipe,
                                  r300->vbuf_mgr->vertex_buffer[i].buffer,
                                  PIPE_TRANSFER_READ |
                                  PIPE_TRANSFER_UNSYNCHRONIZED,
                                  &vb_transfer[i]);
            draw_set_mapped_vertex_buffer(r300->draw, i, buf);
        }
    }

    if (indexed) {
        indices = pipe_buffer_map(pipe, r300->vbuf_mgr->index_buffer.buffer,
                                  PIPE_TRANSFER_READ |
                                  PIPE_TRANSFER_UNSYNCHRONIZED, &ib_transfer);
    }

    draw_set_mapped_index_buffer(r300->draw, indices);

    r300->draw_vbo_locked = TRUE;
    r300->draw_first_emitted = FALSE;
    draw_vbo(r300->draw, info);
    draw_flush(r300->draw);
    r300->draw_vbo_locked = FALSE;

    for (i = 0; i < r300->vbuf_mgr->nr_vertex_buffers; i++) {
        if (r300->vbuf_mgr->vertex_buffer[i].buffer) {
            pipe_buffer_unmap(pipe, vb_transfer[i]);
            draw_set_mapped_vertex_buffer(r300->draw, i, NULL);
        }
    }

    if (indexed) {
        pipe_buffer_unmap(pipe, ib_transfer);
        draw_set_mapped_index_buffer(r300->draw, NULL);
    }
}
/**
 * Draw vertex arrays, with optional indexing.
 * Basically, map the vertex buffers (and drawing surfaces), then hand off
 * the drawing to the 'draw' module.
 *
 * XXX should the element buffer be specified/bound with a separate function?
 */
static void
cell_draw_vbo(struct pipe_context *pipe, const struct pipe_draw_info *info)
{
   struct cell_context *cell = cell_context(pipe);
   struct draw_context *draw = cell->draw;
   void *mapped_indices = NULL;
   unsigned i;

   if (cell->dirty)
      cell_update_derived( cell );

#if 0
   cell_map_surfaces(cell);
#endif

   /*
    * Map vertex buffers
    */
   for (i = 0; i < cell->num_vertex_buffers; i++) {
      void *buf = cell_resource(cell->vertex_buffer[i].buffer)->data;
      draw_set_mapped_vertex_buffer(draw, i, buf);
   }
   /* Map index buffer, if present */
   if (info->indexed && cell->index_buffer.buffer)
      mapped_indices = cell_resource(cell->index_buffer.buffer)->data;

   draw_set_mapped_index_buffer(draw, mapped_indices);

   /* draw! */
   draw_vbo(draw, info);

   /*
    * unmap vertex/index buffers - will cause draw module to flush
    */
   for (i = 0; i < cell->num_vertex_buffers; i++) {
      draw_set_mapped_vertex_buffer(draw, i, NULL);
   }
   if (mapped_indices) {
      draw_set_mapped_index_buffer(draw, NULL);
   }

   /*
    * TODO: Flush only when a user vertex/index buffer is present
    * (or even better, modify draw module to do this
    * internally when this condition is seen?)
    */
   draw_flush(draw);
}
Example #6
0
void
nvfx_draw_vbo_swtnl(struct pipe_context *pipe, const struct pipe_draw_info* info)
{
	struct nvfx_context *nvfx = nvfx_context(pipe);
	unsigned i;
	void *map;

	if (!nvfx_state_validate_swtnl(nvfx))
		return;

	nvfx_state_emit(nvfx);

	/* these must be passed without adding the offsets */
	for (i = 0; i < nvfx->vtxbuf_nr; i++) {
		map = nvfx_buffer(nvfx->vtxbuf[i].buffer)->data;
		draw_set_mapped_vertex_buffer(nvfx->draw, i, map);
	}

	map = NULL;
	if (info->indexed && nvfx->idxbuf.buffer)
		map = nvfx_buffer(nvfx->idxbuf.buffer)->data;
	draw_set_mapped_index_buffer(nvfx->draw, map);

	if (nvfx->constbuf[PIPE_SHADER_VERTEX]) {
		const unsigned nr = nvfx->constbuf_nr[PIPE_SHADER_VERTEX];

		map = nvfx_buffer(nvfx->constbuf[PIPE_SHADER_VERTEX])->data;
		draw_set_mapped_constant_buffer(nvfx->draw, PIPE_SHADER_VERTEX, 0,
                                                map, nr);
	}

	draw_vbo(nvfx->draw, info);

	draw_flush(nvfx->draw);
}
Example #7
0
enum pipe_error
svga_swtnl_draw_vbo(struct svga_context *svga,
                    const struct pipe_draw_info *info)
{
   struct pipe_transfer *vb_transfer[PIPE_MAX_ATTRIBS];
   struct pipe_transfer *ib_transfer = NULL;
   struct pipe_transfer *cb_transfer = NULL;
   struct draw_context *draw = svga->swtnl.draw;
   unsigned i;
   const void *map;
   enum pipe_error ret;

   assert(!svga->dirty);
   assert(svga->state.sw.need_swtnl);
   assert(draw);

   /* Make sure that the need_swtnl flag does not go away */
   svga->state.sw.in_swtnl_draw = TRUE;

   ret = svga_update_state(svga, SVGA_STATE_SWTNL_DRAW);
   if (ret != PIPE_OK) {
      svga_context_flush(svga, NULL);
      ret = svga_update_state(svga, SVGA_STATE_SWTNL_DRAW);
      svga->swtnl.new_vbuf = TRUE;
      assert(ret == PIPE_OK);
   }

   /*
    * Map vertex buffers
    */
   for (i = 0; i < svga->curr.num_vertex_buffers; i++) {
      if (svga->curr.vb[i].buffer) {
         map = pipe_buffer_map(&svga->pipe,
                               svga->curr.vb[i].buffer,
                               PIPE_TRANSFER_READ,
                               &vb_transfer[i]);

         draw_set_mapped_vertex_buffer(draw, i, map);
      }
   }

   /* Map index buffer, if present */
   map = NULL;
   if (info->indexed && svga->curr.ib.buffer) {
      map = pipe_buffer_map(&svga->pipe, svga->curr.ib.buffer,
                            PIPE_TRANSFER_READ,
                            &ib_transfer);
      draw_set_indexes(draw,
                       (const ubyte *) map + svga->curr.ib.offset,
                       svga->curr.ib.index_size);
   }

   if (svga->curr.cb[PIPE_SHADER_VERTEX]) {
      map = pipe_buffer_map(&svga->pipe,
                            svga->curr.cb[PIPE_SHADER_VERTEX],
                            PIPE_TRANSFER_READ,
			    &cb_transfer);
      assert(map);
      draw_set_mapped_constant_buffer(
         draw, PIPE_SHADER_VERTEX, 0,
         map,
         svga->curr.cb[PIPE_SHADER_VERTEX]->width0);
   }

   draw_vbo(draw, info);

   draw_flush(svga->swtnl.draw);

   /* Ensure the draw module didn't touch this */
   assert(i == svga->curr.num_vertex_buffers);
   
   /*
    * unmap vertex/index buffers
    */
   for (i = 0; i < svga->curr.num_vertex_buffers; i++) {
      if (svga->curr.vb[i].buffer) {
         pipe_buffer_unmap(&svga->pipe, vb_transfer[i]);
         draw_set_mapped_vertex_buffer(draw, i, NULL);
      }
   }

   if (ib_transfer) {
      pipe_buffer_unmap(&svga->pipe, ib_transfer);
      draw_set_indexes(draw, NULL, 0);
   }

   if (svga->curr.cb[PIPE_SHADER_VERTEX]) {
      pipe_buffer_unmap(&svga->pipe, cb_transfer);
   }

   /* Now safe to remove the need_swtnl flag in any update_state call */
   svga->state.sw.in_swtnl_draw = FALSE;
   svga->dirty |= SVGA_NEW_NEED_PIPELINE | SVGA_NEW_NEED_SWVFETCH;

   return ret;
}
Example #8
0
/**
 * Draw vertex arrays, with optional indexing, optional instancing.
 * All the other drawing functions are implemented in terms of this function.
 * Basically, map the vertex buffers (and drawing surfaces), then hand off
 * the drawing to the 'draw' module.
 */
static void
llvmpipe_draw_vbo(struct pipe_context *pipe, const struct pipe_draw_info *info)
{
   struct llvmpipe_context *lp = llvmpipe_context(pipe);
   struct draw_context *draw = lp->draw;
   const void *mapped_indices = NULL;
   unsigned i;

   if (!llvmpipe_check_render_cond(lp))
      return;

   if (info->indirect) {
      util_draw_indirect(pipe, info);
      return;
   }

   if (lp->dirty)
      llvmpipe_update_derived( lp );

   /*
    * Map vertex buffers
    */
   for (i = 0; i < lp->num_vertex_buffers; i++) {
      const void *buf = lp->vertex_buffer[i].is_user_buffer ?
                           lp->vertex_buffer[i].buffer.user : NULL;
      size_t size = ~0;
      if (!buf) {
         if (!lp->vertex_buffer[i].buffer.resource) {
            continue;
         }
         buf = llvmpipe_resource_data(lp->vertex_buffer[i].buffer.resource);
         size = lp->vertex_buffer[i].buffer.resource->width0;
      }
      draw_set_mapped_vertex_buffer(draw, i, buf, size);
   }

   /* Map index buffer, if present */
   if (info->index_size) {
      unsigned available_space = ~0;
      mapped_indices = info->has_user_indices ? info->index.user : NULL;
      if (!mapped_indices) {
         mapped_indices = llvmpipe_resource_data(info->index.resource);
         available_space = info->index.resource->width0;
      }
      draw_set_indexes(draw,
                       (ubyte *) mapped_indices,
                       info->index_size, available_space);
   }

   for (i = 0; i < lp->num_so_targets; i++) {
      void *buf = 0;
      if (lp->so_targets[i]) {
         buf = llvmpipe_resource(lp->so_targets[i]->target.buffer)->data;
         lp->so_targets[i]->mapping = buf;
      }
   }
   draw_set_mapped_so_targets(draw, lp->num_so_targets,
                              lp->so_targets);

   llvmpipe_prepare_vertex_sampling(lp,
                                    lp->num_sampler_views[PIPE_SHADER_VERTEX],
                                    lp->sampler_views[PIPE_SHADER_VERTEX]);
   llvmpipe_prepare_geometry_sampling(lp,
                                      lp->num_sampler_views[PIPE_SHADER_GEOMETRY],
                                      lp->sampler_views[PIPE_SHADER_GEOMETRY]);
   if (lp->gs && lp->gs->no_tokens) {
      /* we have an empty geometry shader with stream output, so
         attach the stream output info to the current vertex shader */
      if (lp->vs) {
         draw_vs_attach_so(lp->vs, &lp->gs->stream_output);
      }
   }
   draw_collect_pipeline_statistics(draw,
                                    lp->active_statistics_queries > 0);

   /* draw! */
   draw_vbo(draw, info);

   /*
    * unmap vertex/index buffers
    */
   for (i = 0; i < lp->num_vertex_buffers; i++) {
      draw_set_mapped_vertex_buffer(draw, i, NULL, 0);
   }
   if (mapped_indices) {
      draw_set_indexes(draw, NULL, 0, 0);
   }
   draw_set_mapped_so_targets(draw, 0, NULL);

   if (lp->gs && lp->gs->no_tokens) {
      /* we have attached stream output to the vs for rendering,
         now lets reset it */
      if (lp->vs) {
         draw_vs_reset_so(lp->vs);
      }
   }

   /*
    * TODO: Flush only when a user vertex/index buffer is present
    * (or even better, modify draw module to do this
    * internally when this condition is seen?)
    */
   draw_flush(draw);
}
Example #9
0
/**
 * Called by VBO to draw arrays when in selection or feedback mode and
 * to implement glRasterPos.
 * This is very much like the normal draw_vbo() function above.
 * Look at code refactoring some day.
 * Might move this into the failover module some day.
 */
void
st_feedback_draw_vbo(GLcontext *ctx,
                     const struct gl_client_array **arrays,
                     const struct _mesa_prim *prims,
                     GLuint nr_prims,
                     const struct _mesa_index_buffer *ib,
		     GLboolean index_bounds_valid,
                     GLuint min_index,
                     GLuint max_index)
{
   struct st_context *st = ctx->st;
   struct pipe_context *pipe = st->pipe;
   struct draw_context *draw = st->draw;
   const struct st_vertex_program *vp;
   const struct pipe_shader_state *vs;
   struct pipe_buffer *index_buffer_handle = 0;
   struct pipe_vertex_buffer vbuffers[PIPE_MAX_SHADER_INPUTS];
   struct pipe_vertex_element velements[PIPE_MAX_ATTRIBS];
   GLuint attr, i;
   ubyte *mapped_constants;

   assert(draw);

   st_validate_state(ctx->st);

   if (!index_bounds_valid)
      vbo_get_minmax_index(ctx, prims, ib, &min_index, &max_index);

   /* must get these after state validation! */
   vp = ctx->st->vp;
   vs = &st->vp->state;

   if (!st->vp->draw_shader) {
      st->vp->draw_shader = draw_create_vertex_shader(draw, vs);
   }

   /*
    * Set up the draw module's state.
    *
    * We'd like to do this less frequently, but the normal state-update
    * code sends state updates to the pipe, not to our private draw module.
    */
   assert(draw);
   draw_set_viewport_state(draw, &st->state.viewport);
   draw_set_clip_state(draw, &st->state.clip);
   draw_set_rasterizer_state(draw, &st->state.rasterizer);
   draw_bind_vertex_shader(draw, st->vp->draw_shader);
   set_feedback_vertex_format(ctx);

   /* loop over TGSI shader inputs to determine vertex buffer
    * and attribute info
    */
   for (attr = 0; attr < vp->num_inputs; attr++) {
      const GLuint mesaAttr = vp->index_to_input[attr];
      struct gl_buffer_object *bufobj = arrays[mesaAttr]->BufferObj;
      void *map;

      if (bufobj && bufobj->Name) {
         /* Attribute data is in a VBO.
          * Recall that for VBOs, the gl_client_array->Ptr field is
          * really an offset from the start of the VBO, not a pointer.
          */
         struct st_buffer_object *stobj = st_buffer_object(bufobj);
         assert(stobj->buffer);

         vbuffers[attr].buffer = NULL;
         pipe_buffer_reference(&vbuffers[attr].buffer, stobj->buffer);
         vbuffers[attr].buffer_offset = pointer_to_offset(arrays[0]->Ptr);
         velements[attr].src_offset = arrays[mesaAttr]->Ptr - arrays[0]->Ptr;
      }
      else {
         /* attribute data is in user-space memory, not a VBO */
         uint bytes = (arrays[mesaAttr]->Size
                       * _mesa_sizeof_type(arrays[mesaAttr]->Type)
                       * (max_index + 1));

         /* wrap user data */
         vbuffers[attr].buffer
            = pipe_user_buffer_create(pipe->screen, (void *) arrays[mesaAttr]->Ptr,
                                      bytes);
         vbuffers[attr].buffer_offset = 0;
         velements[attr].src_offset = 0;
      }

      /* common-case setup */
      vbuffers[attr].stride = arrays[mesaAttr]->StrideB; /* in bytes */
      vbuffers[attr].max_index = max_index;
      velements[attr].vertex_buffer_index = attr;
      velements[attr].nr_components = arrays[mesaAttr]->Size;
      velements[attr].src_format = 
         st_pipe_vertex_format(arrays[mesaAttr]->Type,
                               arrays[mesaAttr]->Size,
                               arrays[mesaAttr]->Format,
                               arrays[mesaAttr]->Normalized);
      assert(velements[attr].src_format);

      /* tell draw about this attribute */
#if 0
      draw_set_vertex_buffer(draw, attr, &vbuffer[attr]);
#endif

      /* map the attrib buffer */
      map = pipe_buffer_map(pipe->screen, vbuffers[attr].buffer,
                            PIPE_BUFFER_USAGE_CPU_READ);
      draw_set_mapped_vertex_buffer(draw, attr, map);
   }

   draw_set_vertex_buffers(draw, vp->num_inputs, vbuffers);
   draw_set_vertex_elements(draw, vp->num_inputs, velements);

   if (ib) {
      struct gl_buffer_object *bufobj = ib->obj;
      unsigned indexSize;
      void *map;

      switch (ib->type) {
      case GL_UNSIGNED_INT:
         indexSize = 4;
         break;
      case GL_UNSIGNED_SHORT:
         indexSize = 2;
         break;
      default:
         assert(0);
	 return;
      }

      if (bufobj && bufobj->Name) {
         struct st_buffer_object *stobj = st_buffer_object(bufobj);

         index_buffer_handle = stobj->buffer;

         map = pipe_buffer_map(pipe->screen, index_buffer_handle,
                               PIPE_BUFFER_USAGE_CPU_READ);

         draw_set_mapped_element_buffer(draw, indexSize, map);
      }
      else {
         draw_set_mapped_element_buffer(draw, indexSize, (void *) ib->ptr);
      }
   }
   else {
      /* no index/element buffer */
      draw_set_mapped_element_buffer(draw, 0, NULL);
   }


   /* map constant buffers */
   mapped_constants = pipe_buffer_map(pipe->screen,
                                      st->state.constants[PIPE_SHADER_VERTEX].buffer,
                                      PIPE_BUFFER_USAGE_CPU_READ);
   draw_set_mapped_constant_buffer(st->draw, mapped_constants,
                                   st->state.constants[PIPE_SHADER_VERTEX].buffer->size);


   /* draw here */
   for (i = 0; i < nr_prims; i++) {
      draw_arrays(draw, prims[i].mode, prims[i].start, prims[i].count);
   }


   /* unmap constant buffers */
   pipe_buffer_unmap(pipe->screen, st->state.constants[PIPE_SHADER_VERTEX].buffer);

   /*
    * unmap vertex/index buffers
    */
   for (i = 0; i < PIPE_MAX_ATTRIBS; i++) {
      if (draw->pt.vertex_buffer[i].buffer) {
         pipe_buffer_unmap(pipe->screen, draw->pt.vertex_buffer[i].buffer);
         pipe_buffer_reference(&draw->pt.vertex_buffer[i].buffer, NULL);
         draw_set_mapped_vertex_buffer(draw, i, NULL);
      }
   }
   if (index_buffer_handle) {
      pipe_buffer_unmap(pipe->screen, index_buffer_handle);
      draw_set_mapped_element_buffer(draw, 0, NULL);
   }
}
Example #10
0
/**
 * Called by VBO to draw arrays when in selection or feedback mode and
 * to implement glRasterPos.
 * This is very much like the normal draw_vbo() function above.
 * Look at code refactoring some day.
 */
void
st_feedback_draw_vbo(struct gl_context *ctx,
                     const struct gl_client_array **arrays,
                     const struct _mesa_prim *prims,
                     GLuint nr_prims,
                     const struct _mesa_index_buffer *ib,
		     GLboolean index_bounds_valid,
                     GLuint min_index,
                     GLuint max_index,
                     struct gl_transform_feedback_object *tfb_vertcount)
{
   struct st_context *st = st_context(ctx);
   struct pipe_context *pipe = st->pipe;
   struct draw_context *draw = st->draw;
   const struct st_vertex_program *vp;
   const struct pipe_shader_state *vs;
   struct pipe_vertex_buffer vbuffers[PIPE_MAX_SHADER_INPUTS];
   struct pipe_vertex_element velements[PIPE_MAX_ATTRIBS];
   struct pipe_index_buffer ibuffer;
   struct pipe_transfer *vb_transfer[PIPE_MAX_ATTRIBS];
   struct pipe_transfer *ib_transfer = NULL;
   GLuint attr, i;
   const GLubyte *low_addr = NULL;
   const void *mapped_indices = NULL;

   assert(draw);

   st_validate_state(st);

   if (!index_bounds_valid)
      vbo_get_minmax_indices(ctx, prims, ib, &min_index, &max_index, nr_prims);

   /* must get these after state validation! */
   vp = st->vp;
   vs = &st->vp_variant->tgsi;

   if (!st->vp_variant->draw_shader) {
      st->vp_variant->draw_shader = draw_create_vertex_shader(draw, vs);
   }

   /*
    * Set up the draw module's state.
    *
    * We'd like to do this less frequently, but the normal state-update
    * code sends state updates to the pipe, not to our private draw module.
    */
   assert(draw);
   draw_set_viewport_state(draw, &st->state.viewport);
   draw_set_clip_state(draw, &st->state.clip);
   draw_set_rasterizer_state(draw, &st->state.rasterizer, NULL);
   draw_bind_vertex_shader(draw, st->vp_variant->draw_shader);
   set_feedback_vertex_format(ctx);

   /* Find the lowest address of the arrays we're drawing */
   if (vp->num_inputs) {
      low_addr = arrays[vp->index_to_input[0]]->Ptr;

      for (attr = 1; attr < vp->num_inputs; attr++) {
         const GLubyte *start = arrays[vp->index_to_input[attr]]->Ptr;
         low_addr = MIN2(low_addr, start);
      }
   }

   /* loop over TGSI shader inputs to determine vertex buffer
    * and attribute info
    */
   for (attr = 0; attr < vp->num_inputs; attr++) {
      const GLuint mesaAttr = vp->index_to_input[attr];
      struct gl_buffer_object *bufobj = arrays[mesaAttr]->BufferObj;
      void *map;

      if (bufobj && bufobj->Name) {
         /* Attribute data is in a VBO.
          * Recall that for VBOs, the gl_client_array->Ptr field is
          * really an offset from the start of the VBO, not a pointer.
          */
         struct st_buffer_object *stobj = st_buffer_object(bufobj);
         assert(stobj->buffer);

         vbuffers[attr].buffer = NULL;
         pipe_resource_reference(&vbuffers[attr].buffer, stobj->buffer);
         vbuffers[attr].buffer_offset = pointer_to_offset(low_addr);
         velements[attr].src_offset = arrays[mesaAttr]->Ptr - low_addr;
      }
      else {
         /* attribute data is in user-space memory, not a VBO */
         uint bytes = (arrays[mesaAttr]->Size
                       * _mesa_sizeof_type(arrays[mesaAttr]->Type)
                       * (max_index + 1));

         /* wrap user data */
         vbuffers[attr].buffer
            = pipe_user_buffer_create(pipe->screen, (void *) arrays[mesaAttr]->Ptr,
                                      bytes,
				      PIPE_BIND_VERTEX_BUFFER);
         vbuffers[attr].buffer_offset = 0;
         velements[attr].src_offset = 0;
      }

      /* common-case setup */
      vbuffers[attr].stride = arrays[mesaAttr]->StrideB; /* in bytes */
      velements[attr].instance_divisor = 0;
      velements[attr].vertex_buffer_index = attr;
      velements[attr].src_format = 
         st_pipe_vertex_format(arrays[mesaAttr]->Type,
                               arrays[mesaAttr]->Size,
                               arrays[mesaAttr]->Format,
                               arrays[mesaAttr]->Normalized,
                               arrays[mesaAttr]->Integer);
      assert(velements[attr].src_format);

      /* tell draw about this attribute */
#if 0
      draw_set_vertex_buffer(draw, attr, &vbuffer[attr]);
#endif

      /* map the attrib buffer */
      map = pipe_buffer_map(pipe, vbuffers[attr].buffer,
                            PIPE_TRANSFER_READ,
			    &vb_transfer[attr]);
      draw_set_mapped_vertex_buffer(draw, attr, map);
   }

   draw_set_vertex_buffers(draw, vp->num_inputs, vbuffers);
   draw_set_vertex_elements(draw, vp->num_inputs, velements);

   memset(&ibuffer, 0, sizeof(ibuffer));
   if (ib) {
      struct gl_buffer_object *bufobj = ib->obj;

      ibuffer.index_size = vbo_sizeof_ib_type(ib->type);
      if (ibuffer.index_size == 0)
         goto out_unref_vertex;

      if (bufobj && bufobj->Name) {
         struct st_buffer_object *stobj = st_buffer_object(bufobj);

         pipe_resource_reference(&ibuffer.buffer, stobj->buffer);
         ibuffer.offset = pointer_to_offset(ib->ptr);

         mapped_indices = pipe_buffer_map(pipe, stobj->buffer,
                                          PIPE_TRANSFER_READ, &ib_transfer);
      }
      else {
         /* skip setting ibuffer.buffer as the draw module does not use it */
         mapped_indices = ib->ptr;
      }

      draw_set_index_buffer(draw, &ibuffer);
      draw_set_mapped_index_buffer(draw, mapped_indices);
   }

   /* set the constant buffer */
   draw_set_mapped_constant_buffer(st->draw, PIPE_SHADER_VERTEX, 0,
                                   st->state.constants[PIPE_SHADER_VERTEX].ptr,
                                   st->state.constants[PIPE_SHADER_VERTEX].size);


   /* draw here */
   for (i = 0; i < nr_prims; i++) {
      draw_arrays(draw, prims[i].mode, prims[i].start, prims[i].count);
   }


   /*
    * unmap vertex/index buffers
    */
   if (ib) {
      draw_set_mapped_index_buffer(draw, NULL);
      draw_set_index_buffer(draw, NULL);

      if (ib_transfer)
         pipe_buffer_unmap(pipe, ib_transfer);
      pipe_resource_reference(&ibuffer.buffer, NULL);
   }

 out_unref_vertex:
   for (attr = 0; attr < vp->num_inputs; attr++) {
      pipe_buffer_unmap(pipe, vb_transfer[attr]);
      draw_set_mapped_vertex_buffer(draw, attr, NULL);
      pipe_resource_reference(&vbuffers[attr].buffer, NULL);
   }
   draw_set_vertex_buffers(draw, 0, NULL);
}
Example #11
0
void
nv30_render_vbo(struct pipe_context *pipe, const struct pipe_draw_info *info)
{
   struct nv30_context *nv30 = nv30_context(pipe);
   struct draw_context *draw = nv30->draw;
   struct pipe_transfer *transfer[PIPE_MAX_ATTRIBS] = {NULL};
   struct pipe_transfer *transferi = NULL;
   int i;

   nv30_render_validate(nv30);

   if (nv30->draw_dirty & NV30_NEW_VIEWPORT)
      draw_set_viewport_states(draw, 0, 1, &nv30->viewport);
   if (nv30->draw_dirty & NV30_NEW_RASTERIZER)
      draw_set_rasterizer_state(draw, &nv30->rast->pipe, NULL);
   if (nv30->draw_dirty & NV30_NEW_CLIP)
      draw_set_clip_state(draw, &nv30->clip);
   if (nv30->draw_dirty & NV30_NEW_ARRAYS) {
      draw_set_vertex_buffers(draw, 0, nv30->num_vtxbufs, nv30->vtxbuf);
      draw_set_vertex_elements(draw, nv30->vertex->num_elements, nv30->vertex->pipe);
   }
   if (nv30->draw_dirty & NV30_NEW_FRAGPROG) {
      struct nv30_fragprog *fp = nv30->fragprog.program;
      if (!fp->draw)
         fp->draw = draw_create_fragment_shader(draw, &fp->pipe);
      draw_bind_fragment_shader(draw, fp->draw);
   }
   if (nv30->draw_dirty & NV30_NEW_VERTPROG) {
      struct nv30_vertprog *vp = nv30->vertprog.program;
      if (!vp->draw)
         vp->draw = draw_create_vertex_shader(draw, &vp->pipe);
      draw_bind_vertex_shader(draw, vp->draw);
   }
   if (nv30->draw_dirty & NV30_NEW_VERTCONST) {
      if (nv30->vertprog.constbuf) {
         void *map = nv04_resource(nv30->vertprog.constbuf)->data;
         draw_set_mapped_constant_buffer(draw, PIPE_SHADER_VERTEX, 0,
                                         map, nv30->vertprog.constbuf_nr * 16);
      } else {
         draw_set_mapped_constant_buffer(draw, PIPE_SHADER_VERTEX, 0, NULL, 0);
      }
   }

   for (i = 0; i < nv30->num_vtxbufs; i++) {
      const void *map = nv30->vtxbuf[i].user_buffer;
      if (!map) {
         if (nv30->vtxbuf[i].buffer)
            map = pipe_buffer_map(pipe, nv30->vtxbuf[i].buffer,
                                  PIPE_TRANSFER_UNSYNCHRONIZED |
                                  PIPE_TRANSFER_READ, &transfer[i]);
      }
      draw_set_mapped_vertex_buffer(draw, i, map, ~0);
   }

   if (info->indexed) {
      const void *map = nv30->idxbuf.user_buffer;
      if (!map)
         map = pipe_buffer_map(pipe, nv30->idxbuf.buffer,
                               PIPE_TRANSFER_UNSYNCHRONIZED |
                               PIPE_TRANSFER_READ, &transferi);
      draw_set_indexes(draw,
                       (ubyte *) map + nv30->idxbuf.offset,
                       nv30->idxbuf.index_size, ~0);
   } else {
      draw_set_indexes(draw, NULL, 0, 0);
   }

   draw_vbo(draw, info);
   draw_flush(draw);

   if (info->indexed && transferi)
      pipe_buffer_unmap(pipe, transferi);
   for (i = 0; i < nv30->num_vtxbufs; i++)
      if (transfer[i])
         pipe_buffer_unmap(pipe, transfer[i]);

   nv30->draw_dirty = 0;
   nv30_state_release(nv30);
}
Example #12
0
static boolean
i915_draw_range_elements(struct pipe_context *pipe,
                         struct pipe_buffer *indexBuffer,
                         unsigned indexSize,
                         unsigned min_index,
                         unsigned max_index,
                         unsigned prim, unsigned start, unsigned count)
{
   struct i915_context *i915 = i915_context(pipe);
   struct draw_context *draw = i915->draw;
   unsigned i;

   if (i915->dirty)
      i915_update_derived(i915);

   /*
    * Map vertex buffers
    */
   for (i = 0; i < i915->num_vertex_buffers; i++) {
      void *buf = pipe_buffer_map(pipe->screen, i915->vertex_buffer[i].buffer,
                                  PIPE_BUFFER_USAGE_CPU_READ);
      draw_set_mapped_vertex_buffer(draw, i, buf);
   }

   /*
    * Map index buffer, if present
    */
   if (indexBuffer) {
      void *mapped_indexes = pipe_buffer_map(pipe->screen, indexBuffer,
                                             PIPE_BUFFER_USAGE_CPU_READ);
      draw_set_mapped_element_buffer_range(draw, indexSize,
                                           min_index,
                                           max_index,
                                           mapped_indexes);
   } else {
      draw_set_mapped_element_buffer(draw, 0, NULL);
   }


   draw_set_mapped_constant_buffer(draw,
                                   i915->current.constants[PIPE_SHADER_VERTEX],
                                   (i915->current.num_user_constants[PIPE_SHADER_VERTEX] * 
                                      4 * sizeof(float)));

   /*
    * Do the drawing
    */
   draw_arrays(i915->draw, prim, start, count);

   /*
    * unmap vertex/index buffers
    */
   for (i = 0; i < i915->num_vertex_buffers; i++) {
      pipe_buffer_unmap(pipe->screen, i915->vertex_buffer[i].buffer);
      draw_set_mapped_vertex_buffer(draw, i, NULL);
   }

   if (indexBuffer) {
      pipe_buffer_unmap(pipe->screen, indexBuffer);
      draw_set_mapped_element_buffer_range(draw, 0, start, start + count - 1, NULL);
   }

   return TRUE;
}
Example #13
0
static void
i915_draw_vbo(struct pipe_context *pipe, const struct pipe_draw_info *info)
{
   struct i915_context *i915 = i915_context(pipe);
   struct draw_context *draw = i915->draw;
   const void *mapped_indices = NULL;
   unsigned i;

   /*
    * Ack vs contants here, helps ipers a lot.
    */
   i915->dirty &= ~I915_NEW_VS_CONSTANTS;

   if (i915->dirty)
      i915_update_derived(i915);

   /*
    * Map vertex buffers
    */
   for (i = 0; i < i915->nr_vertex_buffers; i++) {
      const void *buf = i915->vertex_buffers[i].user_buffer;
      if (!buf)
            buf = i915_buffer(i915->vertex_buffers[i].buffer)->data;
      draw_set_mapped_vertex_buffer(draw, i, buf, ~0);
   }

   /*
    * Map index buffer, if present
    */
   if (info->indexed) {
      mapped_indices = i915->index_buffer.user_buffer;
      if (!mapped_indices)
         mapped_indices = i915_buffer(i915->index_buffer.buffer)->data;
      draw_set_indexes(draw,
                       (ubyte *) mapped_indices + i915->index_buffer.offset,
                       i915->index_buffer.index_size, ~0);
   }

   if (i915->constants[PIPE_SHADER_VERTEX])
      draw_set_mapped_constant_buffer(draw, PIPE_SHADER_VERTEX, 0,
                                      i915_buffer(i915->constants[PIPE_SHADER_VERTEX])->data,
                                      (i915->current.num_user_constants[PIPE_SHADER_VERTEX] * 
                                      4 * sizeof(float)));
   else
      draw_set_mapped_constant_buffer(draw, PIPE_SHADER_VERTEX, 0, NULL, 0);

   if (i915->num_vertex_sampler_views > 0)
      i915_prepare_vertex_sampling(i915);

   /*
    * Do the drawing
    */
   draw_vbo(i915->draw, info);

   /*
    * unmap vertex/index buffers
    */
   for (i = 0; i < i915->nr_vertex_buffers; i++) {
      draw_set_mapped_vertex_buffer(i915->draw, i, NULL, 0);
   }
   if (mapped_indices)
      draw_set_indexes(draw, NULL, 0, 0);

   if (i915->num_vertex_sampler_views > 0)
      i915_cleanup_vertex_sampling(i915);

   /*
    * Instead of flushing on every state change, we flush once here
    * when we fire the vbo.
    */
   draw_flush(i915->draw);
}
/**
 * Draw vertex arrays, with optional indexing.
 * Basically, map the vertex buffers (and drawing surfaces), then hand off
 * the drawing to the 'draw' module.
 *
 * XXX should the element buffer be specified/bound with a separate function?
 */
static boolean
cell_draw_range_elements(struct pipe_context *pipe,
                         struct pipe_buffer *indexBuffer,
                         unsigned indexSize,
                         unsigned min_index,
                         unsigned max_index,
                         unsigned mode, unsigned start, unsigned count)
{
   struct cell_context *sp = cell_context(pipe);
   struct draw_context *draw = sp->draw;
   unsigned i;

   if (sp->dirty)
      cell_update_derived( sp );

#if 0
   cell_map_surfaces(sp);
#endif
   cell_map_constant_buffers(sp);

   /*
    * Map vertex buffers
    */
   for (i = 0; i < sp->num_vertex_buffers; i++) {
      void *buf = pipe_buffer_map(pipe->screen,
                                           sp->vertex_buffer[i].buffer,
                                           PIPE_BUFFER_USAGE_CPU_READ);
      cell_flush_buffer_range(sp, buf, sp->vertex_buffer[i].buffer->size);
      draw_set_mapped_vertex_buffer(draw, i, buf);
   }
   /* Map index buffer, if present */
   if (indexBuffer) {
      void *mapped_indexes = pipe_buffer_map(pipe->screen,
                                                      indexBuffer,
                                                      PIPE_BUFFER_USAGE_CPU_READ);
      draw_set_mapped_element_buffer(draw, indexSize, mapped_indexes);
   }
   else {
      /* no index/element buffer */
      draw_set_mapped_element_buffer(draw, 0, NULL);
   }


   /* draw! */
   draw_arrays(draw, mode, start, count);

   /*
    * unmap vertex/index buffers - will cause draw module to flush
    */
   for (i = 0; i < sp->num_vertex_buffers; i++) {
      draw_set_mapped_vertex_buffer(draw, i, NULL);
      pipe_buffer_unmap(pipe->screen, sp->vertex_buffer[i].buffer);
   }
   if (indexBuffer) {
      draw_set_mapped_element_buffer(draw, 0, NULL);
      pipe_buffer_unmap(pipe->screen, indexBuffer);
   }

   /* Note: leave drawing surfaces mapped */
   cell_unmap_constant_buffers(sp);

   return TRUE;
}
Example #15
0
enum pipe_error
svga_swtnl_draw_range_elements(struct svga_context *svga,
                               struct pipe_buffer *indexBuffer,
                               unsigned indexSize,
                               unsigned min_index,
                               unsigned max_index,
                               unsigned prim, unsigned start, unsigned count)
{
    struct draw_context *draw = svga->swtnl.draw;
    unsigned i;
    const void *map;
    enum pipe_error ret;

    assert(!svga->dirty);
    assert(svga->state.sw.need_swtnl);
    assert(draw);

    ret = svga_update_state(svga, SVGA_STATE_SWTNL_DRAW);
    if (ret) {
        svga_context_flush(svga, NULL);
        ret = svga_update_state(svga, SVGA_STATE_SWTNL_DRAW);
        svga->swtnl.new_vbuf = TRUE;
        assert(ret == PIPE_OK);
    }

    /*
     * Map vertex buffers
     */
    for (i = 0; i < svga->curr.num_vertex_buffers; i++) {
        map = pipe_buffer_map(svga->pipe.screen,
        svga->curr.vb[i].buffer,
        PIPE_BUFFER_USAGE_CPU_READ);

        draw_set_mapped_vertex_buffer(draw, i, map);
    }

    /* Map index buffer, if present */
    if (indexBuffer) {
        map = pipe_buffer_map(svga->pipe.screen, indexBuffer,
        PIPE_BUFFER_USAGE_CPU_READ);

        draw_set_mapped_element_buffer_range(draw,
        indexSize,
        min_index,
        max_index,
        map);
    }

    if (svga->curr.cb[PIPE_SHADER_VERTEX]) {
        map = pipe_buffer_map(svga->pipe.screen,
        svga->curr.cb[PIPE_SHADER_VERTEX],
        PIPE_BUFFER_USAGE_CPU_READ);
        assert(map);
        draw_set_mapped_constant_buffer(
            draw, PIPE_SHADER_VERTEX, 0,
            map,
            svga->curr.cb[PIPE_SHADER_VERTEX]->size);
    }

    draw_arrays(svga->swtnl.draw, prim, start, count);

    draw_flush(svga->swtnl.draw);

    /* Ensure the draw module didn't touch this */
    assert(i == svga->curr.num_vertex_buffers);

    /*
     * unmap vertex/index buffers
     */
    for (i = 0; i < svga->curr.num_vertex_buffers; i++) {
        pipe_buffer_unmap(svga->pipe.screen, svga->curr.vb[i].buffer);
        draw_set_mapped_vertex_buffer(draw, i, NULL);
    }

    if (indexBuffer) {
        pipe_buffer_unmap(svga->pipe.screen, indexBuffer);
        draw_set_mapped_element_buffer(draw, 0, NULL);
    }

    if (svga->curr.cb[PIPE_SHADER_VERTEX]) {
        pipe_buffer_unmap(svga->pipe.screen,
        svga->curr.cb[PIPE_SHADER_VERTEX]);
    }

    return ret;
}
/* SW TCL elements, using Draw. */
static void r300_swtcl_draw_vbo(struct pipe_context* pipe,
                                const struct pipe_draw_info *info)
{
    struct r300_context* r300 = r300_context(pipe);
    struct pipe_transfer *vb_transfer[PIPE_MAX_ATTRIBS];
    struct pipe_transfer *ib_transfer = NULL;
    unsigned count = info->count;
    int i;
    void *indices = NULL;
    boolean indexed = info->indexed && r300->index_buffer.buffer;

    if (r300->skip_rendering) {
        return;
    }

    if (!u_trim_pipe_prim(info->mode, &count)) {
        return;
    }

    r300_update_derived_state(r300);

    r300_reserve_cs_dwords(r300,
            PREP_FIRST_DRAW | PREP_EMIT_AOS_SWTCL |
            (indexed ? PREP_INDEXED : 0),
            indexed ? 256 : 6);

    for (i = 0; i < r300->vertex_buffer_count; i++) {
        if (r300->vertex_buffer[i].buffer) {
            void *buf = pipe_buffer_map(pipe,
                                  r300->vertex_buffer[i].buffer,
                                  PIPE_TRANSFER_READ,
                                  &vb_transfer[i]);
            draw_set_mapped_vertex_buffer(r300->draw, i, buf);
        }
    }

    if (indexed) {
        indices = pipe_buffer_map(pipe, r300->index_buffer.buffer,
                                  PIPE_TRANSFER_READ, &ib_transfer);
    }

    draw_set_mapped_index_buffer(r300->draw, indices);

    r300->draw_vbo_locked = TRUE;
    r300->draw_first_emitted = FALSE;
    draw_vbo(r300->draw, info);
    draw_flush(r300->draw);
    r300->draw_vbo_locked = FALSE;

    for (i = 0; i < r300->vertex_buffer_count; i++) {
        if (r300->vertex_buffer[i].buffer) {
            pipe_buffer_unmap(pipe, r300->vertex_buffer[i].buffer,
                              vb_transfer[i]);
            draw_set_mapped_vertex_buffer(r300->draw, i, NULL);
        }
    }

    if (indexed) {
        pipe_buffer_unmap(pipe, r300->index_buffer.buffer, ib_transfer);
        draw_set_mapped_index_buffer(r300->draw, NULL);
    }
}
Example #17
0
/**
 * Draw vertex arrays, with optional indexing.
 * Basically, map the vertex buffers (and drawing surfaces), then hand off
 * the drawing to the 'draw' module.
 */
boolean
llvmpipe_draw_range_elements(struct pipe_context *pipe,
                             struct pipe_buffer *indexBuffer,
                             unsigned indexSize,
                             unsigned min_index,
                             unsigned max_index,
                             unsigned mode, unsigned start, unsigned count)
{
   struct llvmpipe_context *lp = llvmpipe_context(pipe);
   struct draw_context *draw = lp->draw;
   unsigned i;

   lp->reduced_api_prim = u_reduced_prim(mode);

   if (lp->dirty)
      llvmpipe_update_derived( lp );

   llvmpipe_map_transfers(lp);

   /*
    * Map vertex buffers
    */
   for (i = 0; i < lp->num_vertex_buffers; i++) {
      void *buf = llvmpipe_buffer(lp->vertex_buffer[i].buffer)->data;
      draw_set_mapped_vertex_buffer(draw, i, buf);
   }

   /* Map index buffer, if present */
   if (indexBuffer) {
      void *mapped_indexes = llvmpipe_buffer(indexBuffer)->data;
      draw_set_mapped_element_buffer_range(draw, indexSize,
                                           min_index,
                                           max_index,
                                           mapped_indexes);
   }
   else {
      /* no index/element buffer */
      draw_set_mapped_element_buffer_range(draw, 0, start,
                                           start + count - 1, NULL);
   }

   /* draw! */
   draw_arrays(draw, mode, start, count);

   /*
    * unmap vertex/index buffers
    */
   for (i = 0; i < lp->num_vertex_buffers; i++) {
      draw_set_mapped_vertex_buffer(draw, i, NULL);
   }
   if (indexBuffer) {
      draw_set_mapped_element_buffer(draw, 0, NULL);
   }

   /*
    * TODO: Flush only when a user vertex/index buffer is present
    * (or even better, modify draw module to do this
    * internally when this condition is seen?)
    */
   draw_flush(draw);

   /* Note: leave drawing surfaces mapped */

   lp->dirty_render_cache = TRUE;
   
   return TRUE;
}