Esempio n. 1
0
/**
 * Examine vertex arrays to update the gl_array_object::_MaxElement field.
 */
void
_mesa_update_array_object_max_element(struct gl_context *ctx,
                                      struct gl_array_object *arrayObj)
{
   GLbitfield64 enabled = arrayObj->_Enabled;
   GLuint min = ~0u;

   while (enabled) {
      GLint attrib = _mesa_ffsll(enabled) - 1;
      enabled &= ~BITFIELD64_BIT(attrib);
      min = update_min(min, &arrayObj->VertexAttrib[attrib]);
   }

   /* _MaxElement is one past the last legal array element */
   arrayObj->_MaxElement = min;
}
void brw_emit_vertices( struct brw_context *brw,
                        GLuint min_index,
                        GLuint max_index )
{
   GLcontext *ctx = &brw->intel.ctx;
   struct intel_context *intel = intel_context(ctx);
   GLuint tmp = brw->vs.prog_data->inputs_read;
   struct brw_vertex_element *enabled[VERT_ATTRIB_MAX];
   GLuint i;
   GLuint nr_enabled = 0;

  /* Accumulate the list of enabled arrays. */
   while (tmp) {
      i = _mesa_ffsll(tmp)-1;
      struct brw_vertex_element *input = &brw->vb.inputs[i];

      tmp &= ~(1<<i);
      enabled[nr_enabled++] = input;
   }


   /* Now emit VB and VEP state packets.
    *
    * This still defines a hardware VB for each input, even if they
    * are interleaved or from the same VBO.  TBD if this makes a
    * performance difference.
    */
   BEGIN_BATCH(1 + nr_enabled * 4, IGNORE_CLIPRECTS);
   OUT_BATCH((CMD_VERTEX_BUFFER << 16) |
	     ((1 + nr_enabled * 4) - 2));

   for (i = 0; i < nr_enabled; i++) {
      struct brw_vertex_element *input = enabled[i];

      OUT_BATCH((i << BRW_VB0_INDEX_SHIFT) |
		BRW_VB0_ACCESS_VERTEXDATA |
		(input->stride << BRW_VB0_PITCH_SHIFT));
      OUT_RELOC(input->bo,
		DRM_BO_FLAG_MEM_TT | DRM_BO_FLAG_READ,
		input->offset);
      OUT_BATCH(max_index);
      OUT_BATCH(0); /* Instance data step rate */

      /* Unreference the buffer so it can get freed, now that we won't
       * touch it any more.
       */
      dri_bo_unreference(input->bo);
      input->bo = NULL;
   }
   ADVANCE_BATCH();

   BEGIN_BATCH(1 + nr_enabled * 2, IGNORE_CLIPRECTS);
   OUT_BATCH((CMD_VERTEX_ELEMENT << 16) | ((1 + nr_enabled * 2) - 2));
   for (i = 0; i < nr_enabled; i++) {
      struct brw_vertex_element *input = enabled[i];
      uint32_t format = get_surface_type(input->glarray->Type,
					 input->glarray->Size,
					 input->glarray->Normalized);
      uint32_t comp0 = BRW_VE1_COMPONENT_STORE_SRC;
      uint32_t comp1 = BRW_VE1_COMPONENT_STORE_SRC;
      uint32_t comp2 = BRW_VE1_COMPONENT_STORE_SRC;
      uint32_t comp3 = BRW_VE1_COMPONENT_STORE_SRC;

      switch (input->glarray->Size) {
      case 0: comp0 = BRW_VE1_COMPONENT_STORE_0;
      case 1: comp1 = BRW_VE1_COMPONENT_STORE_0;
      case 2: comp2 = BRW_VE1_COMPONENT_STORE_0;
      case 3: comp3 = BRW_VE1_COMPONENT_STORE_1_FLT;
	 break;
      }

      OUT_BATCH((i << BRW_VE0_INDEX_SHIFT) |
		BRW_VE0_VALID |
		(format << BRW_VE0_FORMAT_SHIFT) |
		(0 << BRW_VE0_SRC_OFFSET_SHIFT));
      OUT_BATCH((comp0 << BRW_VE1_COMPONENT_0_SHIFT) |
		(comp1 << BRW_VE1_COMPONENT_1_SHIFT) |
		(comp2 << BRW_VE1_COMPONENT_2_SHIFT) |
		(comp3 << BRW_VE1_COMPONENT_3_SHIFT) |
		((i * 4) << BRW_VE1_DST_OFFSET_SHIFT));
   }
   ADVANCE_BATCH();
}
Esempio n. 3
0
GLboolean brw_upload_vertices( struct brw_context *brw,
			       GLuint min_index,
			       GLuint max_index )
{
   GLcontext *ctx = &brw->intel.ctx;
   struct intel_context *intel = intel_context(ctx);
   GLuint tmp = brw->vs.prog_data->inputs_read; 
   struct brw_vertex_element_packet vep;
   struct brw_array_state vbp;
   GLuint i;
   const void *ptr = NULL;
   GLuint interleave = 0;

   struct brw_vertex_element *enabled[VERT_ATTRIB_MAX];
   GLuint nr_enabled = 0;

   struct brw_vertex_element *upload[VERT_ATTRIB_MAX];
   GLuint nr_uploads = 0;
   

   memset(&vbp, 0, sizeof(vbp));
   memset(&vep, 0, sizeof(vep));

   /* First build an array of pointers to ve's in vb.inputs_read
    */
   if (0)
      _mesa_printf("%s %d..%d\n", __FUNCTION__, min_index, max_index);
   
   while (tmp) {
      GLuint i = _mesa_ffsll(tmp)-1;
      struct brw_vertex_element *input = &brw->vb.inputs[i];

      tmp &= ~(1<<i);
      enabled[nr_enabled++] = input;

      input->index = i;
      input->element_size = get_size(input->glarray->Type) * input->glarray->Size;
      input->count = input->glarray->StrideB ? max_index + 1 - min_index : 1;

      if (!input->glarray->BufferObj->Name) {
	 if (i == 0) {
	    /* Position array not properly enabled:
	     */
	    if (input->glarray->StrideB == 0)
	       return GL_FALSE;

	    interleave = input->glarray->StrideB;
	    ptr = input->glarray->Ptr;
	 }
	 else if (interleave != input->glarray->StrideB ||
		  (const char *)input->glarray->Ptr - (const char *)ptr < 0 ||
		  (const char *)input->glarray->Ptr - (const char *)ptr > interleave) {
	    interleave = 0;
	 }

	 upload[nr_uploads++] = input;
	 
	 /* We rebase drawing to start at element zero only when
	  * varyings are not in vbos, which means we can end up
	  * uploading non-varying arrays (stride != 0) when min_index
	  * is zero.  This doesn't matter as the amount to upload is
	  * the same for these arrays whether the draw call is rebased
	  * or not - we just have to upload the one element.
	  */
	 assert(min_index == 0 || input->glarray->StrideB == 0);
      }
   }

   /* Upload interleaved arrays if all uploads are interleaved
    */
   if (nr_uploads > 1 && 
       interleave && 
       interleave <= 256) {
      struct brw_vertex_element *input0 = upload[0];

      input0->glarray = copy_array_to_vbo_array(brw, 0,
						input0->glarray, 
						interleave,
						input0->count);

      for (i = 1; i < nr_uploads; i++) {
	 upload[i]->glarray = interleaved_vbo_array(brw,
						    i,
						    input0->glarray,
						    upload[i]->glarray,
						    ptr);
      }
   }
   else {
      for (i = 0; i < nr_uploads; i++) {
	 struct brw_vertex_element *input = upload[i];

	 input->glarray = copy_array_to_vbo_array(brw, i, 
						  input->glarray,
						  input->element_size,
						  input->count);

      }
   }

   /* XXX: In the rare cases where this happens we fallback all
    * the way to software rasterization, although a tnl fallback
    * would be sufficient.  I don't know of *any* real world
    * cases with > 17 vertex attributes enabled, so it probably
    * isn't an issue at this point.
    */
   if (nr_enabled >= BRW_VEP_MAX)
	 return GL_FALSE;

   /* This still defines a hardware VB for each input, even if they
    * are interleaved or from the same VBO.  TBD if this makes a
    * performance difference.
    */
   for (i = 0; i < nr_enabled; i++) {
      struct brw_vertex_element *input = enabled[i];

      input->vep = &vep.ve[i];
      input->vep->ve0.src_format = get_surface_type(input->glarray->Type, 
						    input->glarray->Size,
						    input->glarray->Normalized);
      input->vep->ve0.valid = 1;
      input->vep->ve1.dst_offset = (i) * 4;
      input->vep->ve1.vfcomponent3 = BRW_VFCOMPONENT_STORE_SRC;
      input->vep->ve1.vfcomponent2 = BRW_VFCOMPONENT_STORE_SRC;
      input->vep->ve1.vfcomponent1 = BRW_VFCOMPONENT_STORE_SRC;
      input->vep->ve1.vfcomponent0 = BRW_VFCOMPONENT_STORE_SRC;

      switch (input->glarray->Size) {
      case 0: input->vep->ve1.vfcomponent0 = BRW_VFCOMPONENT_STORE_0;
      case 1: input->vep->ve1.vfcomponent1 = BRW_VFCOMPONENT_STORE_0;
      case 2: input->vep->ve1.vfcomponent2 = BRW_VFCOMPONENT_STORE_0;
      case 3: input->vep->ve1.vfcomponent3 = BRW_VFCOMPONENT_STORE_1_FLT;
	 break;
      }

      input->vep->ve0.vertex_buffer_index = i;
      input->vep->ve0.src_offset = 0;

      vbp.vb[i].vb0.bits.pitch = input->glarray->StrideB;
      vbp.vb[i].vb0.bits.pad = 0;
      vbp.vb[i].vb0.bits.access_type = BRW_VERTEXBUFFER_ACCESS_VERTEXDATA;
      vbp.vb[i].vb0.bits.vb_index = i;
      vbp.vb[i].offset = (GLuint)input->glarray->Ptr;
      vbp.vb[i].buffer = array_buffer(input->glarray);
      vbp.vb[i].max_index = max_index;
   }



   /* Now emit VB and VEP state packets:
    */
   vbp.header.bits.length = (1 + nr_enabled * 4) - 2;
   vbp.header.bits.opcode = CMD_VERTEX_BUFFER;

   BEGIN_BATCH(vbp.header.bits.length+2, 0);
   OUT_BATCH( vbp.header.dword );
   
   for (i = 0; i < nr_enabled; i++) {
      OUT_BATCH( vbp.vb[i].vb0.dword );
      OUT_BATCH( bmBufferOffset(&brw->intel, vbp.vb[i].buffer) + vbp.vb[i].offset);
      OUT_BATCH( vbp.vb[i].max_index );
      OUT_BATCH( vbp.vb[i].instance_data_step_rate );
   }
   ADVANCE_BATCH();

   vep.header.length = (1 + nr_enabled * sizeof(vep.ve[0])/4) - 2;
   vep.header.opcode = CMD_VERTEX_ELEMENT;
   brw_cached_batch_struct(brw, &vep, 4 + nr_enabled * sizeof(vep.ve[0]));

   return GL_TRUE;
}
int brw_prepare_vertices( struct brw_context *brw,
			       GLuint min_index,
			       GLuint max_index )
{
   GLcontext *ctx = &brw->intel.ctx;
   struct intel_context *intel = intel_context(ctx);
   GLuint tmp = brw->vs.prog_data->inputs_read; 
   GLuint i;
   const unsigned char *ptr = NULL;
   GLuint interleave = 0;
   int ret = 0;

   struct brw_vertex_element *enabled[VERT_ATTRIB_MAX];
   GLuint nr_enabled = 0;

   struct brw_vertex_element *upload[VERT_ATTRIB_MAX];
   GLuint nr_uploads = 0;

   /* First build an array of pointers to ve's in vb.inputs_read
    */
   if (0)
      _mesa_printf("%s %d..%d\n", __FUNCTION__, min_index, max_index);

   /* Accumulate the list of enabled arrays. */
   while (tmp) {
      GLuint i = _mesa_ffsll(tmp)-1;
      struct brw_vertex_element *input = &brw->vb.inputs[i];

      tmp &= ~(1<<i);
      enabled[nr_enabled++] = input;
   }

   /* XXX: In the rare cases where this happens we fallback all
    * the way to software rasterization, although a tnl fallback
    * would be sufficient.  I don't know of *any* real world
    * cases with > 17 vertex attributes enabled, so it probably
    * isn't an issue at this point.
    */
   if (nr_enabled >= BRW_VEP_MAX)
       return -1;

   for (i = 0; i < nr_enabled; i++) {
      struct brw_vertex_element *input = enabled[i];

      input->element_size = get_size(input->glarray->Type) * input->glarray->Size;
      input->count = input->glarray->StrideB ? max_index + 1 - min_index : 1;

      if (input->glarray->BufferObj->Name != 0) {
	 struct intel_buffer_object *intel_buffer =
	    intel_buffer_object(input->glarray->BufferObj);

	 /* Named buffer object: Just reference its contents directly. */
	 input->bo = intel_bufferobj_buffer(intel, intel_buffer,
					    INTEL_READ);
	 dri_bo_reference(input->bo);
	 input->offset = (unsigned long)input->glarray->Ptr;
	 input->stride = input->glarray->StrideB;

	 ret |= dri_bufmgr_check_aperture_space(input->bo);
      } else {
	 /* Queue the buffer object up to be uploaded in the next pass,
	  * when we've decided if we're doing interleaved or not.
	  */
	 if (i == 0) {
	    /* Position array not properly enabled:
	     */
	    if (input->glarray->StrideB == 0)
	      return -1;

	    interleave = input->glarray->StrideB;
	    ptr = input->glarray->Ptr;
	 }
	 else if (interleave != input->glarray->StrideB ||
		  (const unsigned char *)input->glarray->Ptr - ptr < 0 ||
		  (const unsigned char *)input->glarray->Ptr - ptr > interleave)
	 {
	    interleave = 0;
	 }

	 upload[nr_uploads++] = input;
	 
	 /* We rebase drawing to start at element zero only when
	  * varyings are not in vbos, which means we can end up
	  * uploading non-varying arrays (stride != 0) when min_index
	  * is zero.  This doesn't matter as the amount to upload is
	  * the same for these arrays whether the draw call is rebased
	  * or not - we just have to upload the one element.
	  */
	 assert(min_index == 0 || input->glarray->StrideB == 0);
      }
   }

   /* Handle any arrays to be uploaded. */
   if (nr_uploads > 1 && interleave && interleave <= 256) {
      /* All uploads are interleaved, so upload the arrays together as
       * interleaved.  First, upload the contents and set up upload[0].
       */
      copy_array_to_vbo_array(brw, upload[0], interleave);

      ret |= dri_bufmgr_check_aperture_space(upload[0]->bo);
      for (i = 1; i < nr_uploads; i++) {
	 /* Then, just point upload[i] at upload[0]'s buffer. */
	 upload[i]->stride = interleave;
	 upload[i]->offset = upload[0]->offset +
	    ((const unsigned char *)upload[i]->glarray->Ptr - ptr);
	 upload[i]->bo = upload[0]->bo;
	 dri_bo_reference(upload[i]->bo);
      }
   }
   else {
      /* Upload non-interleaved arrays */
      for (i = 0; i < nr_uploads; i++) {
          copy_array_to_vbo_array(brw, upload[i], upload[i]->element_size);
          if (upload[i]->bo) {
              ret |= dri_bufmgr_check_aperture_space(upload[i]->bo);
          }
      }
   }


   if (ret)
     return 1;


   return 0;
}