Exemple #1
0
static void prepare_depthbuffer(struct brw_context *brw)
{
    struct intel_context *intel = &brw->intel;
    struct gl_context *ctx = &intel->ctx;
    struct gl_framebuffer *fb = ctx->DrawBuffer;
    struct intel_renderbuffer *drb = intel_get_renderbuffer(fb, BUFFER_DEPTH);
    struct intel_renderbuffer *srb = intel_get_renderbuffer(fb, BUFFER_STENCIL);

    if (drb)
        brw_add_validated_bo(brw, drb->region->buffer);
    if (srb)
        brw_add_validated_bo(brw, srb->region->buffer);
}
static int prepare_depthbuffer(struct brw_context *brw)
{
   struct pipe_surface *zsbuf = brw->curr.fb.zsbuf;

   if (zsbuf)
      brw_add_validated_bo(brw, brw_surface(zsbuf)->bo);

   return 0;
}
Exemple #3
0
/***********************************************************************
 * Emit all state:
 */
enum pipe_error brw_validate_state( struct brw_context *brw )
{
   struct brw_state_flags *state = &brw->state.dirty;
   GLuint i;
   int ret;

   brw_clear_validated_bos(brw);
   brw_add_validated_bo(brw, brw->batch->buf);

   if (brw->flags.always_emit_state) {
      state->mesa |= ~0;
      state->brw |= ~0;
      state->cache |= ~0;
   }

   if (state->mesa == 0 &&
       state->cache == 0 &&
       state->brw == 0)
      return 0;

   if (brw->state.dirty.brw & BRW_NEW_CONTEXT)
      brw_clear_batch_cache(brw);

   /* do prepare stage for all atoms */
   for (i = 0; i < Elements(atoms); i++) {
      const struct brw_tracked_state *atom = atoms[i];

      if (check_state(state, &atom->dirty)) {
         if (atom->prepare) {
            ret = atom->prepare(brw);
	    if (ret)
	       return ret;
        }
      }
   }

   /* Make sure that the textures which are referenced by the current
    * brw fragment program are actually present/valid.
    * If this fails, we can experience GPU lock-ups.
    */
   {
      const struct brw_fragment_shader *fp = brw->curr.fragment_shader;
      if (fp) {
         assert(fp->info.file_max[TGSI_FILE_SAMPLER] < (int)brw->curr.num_samplers);
	 /*assert(fp->info.texture_max <= brw->curr.num_textures);*/
      }
   }

   return 0;
}
static int prepare_psp_urb_cbs(struct brw_context *brw)
{
   brw_add_validated_bo(brw, brw->vs.state_bo);
   brw_add_validated_bo(brw, brw->gs.state_bo);
   brw_add_validated_bo(brw, brw->clip.state_bo);
   brw_add_validated_bo(brw, brw->sf.state_bo);
   brw_add_validated_bo(brw, brw->wm.state_bo);
   brw_add_validated_bo(brw, brw->cc.state_bo);
   return 0;
}
Exemple #5
0
static void brw_prepare_indices(struct brw_context *brw)
{
   struct gl_context *ctx = &brw->intel.ctx;
   struct intel_context *intel = &brw->intel;
   const struct _mesa_index_buffer *index_buffer = brw->ib.ib;
   GLuint ib_size;
   drm_intel_bo *bo = NULL;
   struct gl_buffer_object *bufferobj;
   GLuint offset;
   GLuint ib_type_size;

   if (index_buffer == NULL)
      return;

   ib_type_size = get_size(index_buffer->type);
   ib_size = ib_type_size * index_buffer->count;
   bufferobj = index_buffer->obj;

   /* Turn into a proper VBO:
    */
   if (!_mesa_is_bufferobj(bufferobj)) {

      /* Get new bufferobj, offset:
       */
      intel_upload_data(&brw->intel, index_buffer->ptr, ib_size, ib_type_size,
			&bo, &offset);
      brw->ib.start_vertex_offset = offset / ib_type_size;
   } else {
      offset = (GLuint) (unsigned long) index_buffer->ptr;

      /* If the index buffer isn't aligned to its element size, we have to
       * rebase it into a temporary.
       */
       if ((get_size(index_buffer->type) - 1) & offset) {
           GLubyte *map = ctx->Driver.MapBuffer(ctx,
                                                GL_ELEMENT_ARRAY_BUFFER_ARB,
                                                GL_DYNAMIC_DRAW_ARB,
                                                bufferobj);
           map += offset;

	   intel_upload_data(&brw->intel, map, ib_size, ib_type_size,
			     &bo, &offset);
	   brw->ib.start_vertex_offset = offset / ib_type_size;

           ctx->Driver.UnmapBuffer(ctx, GL_ELEMENT_ARRAY_BUFFER_ARB, bufferobj);
       } else {
	  /* Use CMD_3D_PRIM's start_vertex_offset to avoid re-uploading
	   * the index buffer state when we're just moving the start index
	   * of our drawing.
	   */
	  brw->ib.start_vertex_offset = offset / ib_type_size;

	  bo = intel_bufferobj_source(intel,
				      intel_buffer_object(bufferobj),
				      ib_type_size,
				      &offset);
	  drm_intel_bo_reference(bo);

	  brw->ib.start_vertex_offset += offset / ib_type_size;
       }
   }

   if (brw->ib.bo != bo) {
      drm_intel_bo_unreference(brw->ib.bo);
      brw->ib.bo = bo;

      brw_add_validated_bo(brw, brw->ib.bo);
      brw->state.dirty.brw |= BRW_NEW_INDEX_BUFFER;
   } else {
      drm_intel_bo_unreference(bo);
   }

   if (index_buffer->type != brw->ib.type) {
      brw->ib.type = index_buffer->type;
      brw->state.dirty.brw |= BRW_NEW_INDEX_BUFFER;
   }
}
Exemple #6
0
static void brw_prepare_vertices(struct brw_context *brw)
{
   struct gl_context *ctx = &brw->intel.ctx;
   struct intel_context *intel = intel_context(ctx);
   /* CACHE_NEW_VS_PROG */
   GLbitfield vs_inputs = brw->vs.prog_data->inputs_read;
   const unsigned char *ptr = NULL;
   GLuint interleaved = 0, total_size = 0;
   unsigned int min_index = brw->vb.min_index;
   unsigned int max_index = brw->vb.max_index;
   int delta, i, j;

   struct brw_vertex_element *upload[VERT_ATTRIB_MAX];
   GLuint nr_uploads = 0;

   /* First build an array of pointers to ve's in vb.inputs_read
    */
   if (0)
      printf("%s %d..%d\n", __FUNCTION__, min_index, max_index);

   /* Accumulate the list of enabled arrays. */
   brw->vb.nr_enabled = 0;
   while (vs_inputs) {
      GLuint i = ffs(vs_inputs) - 1;
      struct brw_vertex_element *input = &brw->vb.inputs[i];

      vs_inputs &= ~(1 << i);
      if (input->glarray->Size && get_size(input->glarray->Type))
         brw->vb.enabled[brw->vb.nr_enabled++] = input;
   }

   if (brw->vb.nr_enabled == 0)
      return;

   if (brw->vb.nr_buffers)
      goto validate;

   /* XXX: In the rare cases where this happens we fallback all
    * the way to software rasterization, although a tnl fallback
    * would be sufficient.  I don't know of *any* real world
    * cases with > 17 vertex attributes enabled, so it probably
    * isn't an issue at this point.
    */
   if (brw->vb.nr_enabled >= BRW_VEP_MAX) {
      intel->Fallback = GL_TRUE; /* boolean, not bitfield */
      return;
   }

   for (i = j = 0; i < brw->vb.nr_enabled; i++) {
      struct brw_vertex_element *input = brw->vb.enabled[i];
      const struct gl_client_array *glarray = input->glarray;
      int type_size = get_size(glarray->Type);

      input->element_size = type_size * glarray->Size;

      if (_mesa_is_bufferobj(glarray->BufferObj)) {
	 struct intel_buffer_object *intel_buffer =
	    intel_buffer_object(glarray->BufferObj);
	 int k;

	 for (k = 0; k < i; k++) {
	    const struct gl_client_array *other = brw->vb.enabled[k]->glarray;
	    if (glarray->BufferObj == other->BufferObj &&
		glarray->StrideB == other->StrideB &&
		(uintptr_t)(glarray->Ptr - other->Ptr) < glarray->StrideB)
	    {
	       input->buffer = brw->vb.enabled[k]->buffer;
	       input->offset = glarray->Ptr - other->Ptr;
	       break;
	    }
	 }
	 if (k == i) {
	    struct brw_vertex_buffer *buffer = &brw->vb.buffers[j];

	    /* Named buffer object: Just reference its contents directly. */
            buffer->bo = intel_bufferobj_source(intel,
                                                intel_buffer, type_size,
						&buffer->offset);
	    drm_intel_bo_reference(buffer->bo);
	    buffer->offset += (uintptr_t)glarray->Ptr;
	    buffer->stride = glarray->StrideB;

	    input->buffer = j++;
	    input->offset = 0;
	 }

	 /* This is a common place to reach if the user mistakenly supplies
	  * a pointer in place of a VBO offset.  If we just let it go through,
	  * we may end up dereferencing a pointer beyond the bounds of the
	  * GTT.  We would hope that the VBO's max_index would save us, but
	  * Mesa appears to hand us min/max values not clipped to the
	  * array object's _MaxElement, and _MaxElement frequently appears
	  * to be wrong anyway.
	  *
	  * The VBO spec allows application termination in this case, and it's
	  * probably a service to the poor programmer to do so rather than
	  * trying to just not render.
	  */
	 assert(input->offset < brw->vb.buffers[input->buffer].bo->size);
      } else {
	 /* Queue the buffer object up to be uploaded in the next pass,
	  * when we've decided if we're doing interleaved or not.
	  */
	 if (nr_uploads == 0) {
	    /* Position array not properly enabled:
	     */
	    if (input->attrib == VERT_ATTRIB_POS && glarray->StrideB == 0) {
               intel->Fallback = GL_TRUE; /* boolean, not bitfield */
               return;
            }

	    interleaved = glarray->StrideB;
	    ptr = glarray->Ptr;
	 }
	 else if (interleaved != glarray->StrideB ||
		  (uintptr_t)(glarray->Ptr - ptr) > interleaved)
	 {
	    interleaved = 0;
	 }
	 else if ((uintptr_t)(glarray->Ptr - ptr) & (type_size -1))
	 {
	    /* enforce natural alignment (for doubles) */
	    interleaved = 0;
	 }

	 upload[nr_uploads++] = input;
	 total_size = ALIGN(total_size, type_size);
	 total_size += input->element_size;
      }
   }

   /* If we need to upload all the arrays, then we can trim those arrays to
    * only the used elements [min_index, max_index] so long as we adjust all
    * the values used in the 3DPRIMITIVE i.e. by setting the vertex bias.
    */
   brw->vb.start_vertex_bias = 0;
   delta = min_index;
   if (nr_uploads == brw->vb.nr_enabled) {
      brw->vb.start_vertex_bias = -delta;
      delta = 0;
   }
   if (delta && !brw->intel.intelScreen->relaxed_relocations)
      min_index = delta = 0;

   /* Handle any arrays to be uploaded. */
   if (nr_uploads > 1) {
      if (interleaved && interleaved <= 2*total_size) {
	 struct brw_vertex_buffer *buffer = &brw->vb.buffers[j];
	 /* All uploads are interleaved, so upload the arrays together as
	  * interleaved.  First, upload the contents and set up upload[0].
	  */
	 copy_array_to_vbo_array(brw, upload[0], min_index, max_index,
				 buffer, interleaved);
	 buffer->offset -= delta * interleaved;

	 for (i = 0; i < nr_uploads; i++) {
	    /* Then, just point upload[i] at upload[0]'s buffer. */
	    upload[i]->offset =
	       ((const unsigned char *)upload[i]->glarray->Ptr - ptr);
	    upload[i]->buffer = j;
	 }
	 j++;

	 nr_uploads = 0;
      }
      else if (total_size < 2048) {
	 /* Upload non-interleaved arrays into a single interleaved array */
	 struct brw_vertex_buffer *buffer;
	 int count = max_index - min_index + 1;
	 int offset;
	 char *map;

	 map = intel_upload_map(&brw->intel, total_size * count, total_size);
	 for (i = offset = 0; i < nr_uploads; i++) {
	    const unsigned char *src = upload[i]->glarray->Ptr;
	    int size = upload[i]->element_size;
	    int stride = upload[i]->glarray->StrideB;
	    char *dst;
	    int n;

	    offset = ALIGN(offset, get_size(upload[i]->glarray->Type));
	    dst = map + offset;
	    src += min_index * stride;

	    for (n = 0; n < count; n++) {
	       memcpy(dst, src, size);
	       src += stride;
	       dst += total_size;
	    }

	    upload[i]->offset = offset;
	    upload[i]->buffer = j;

	    offset += size;
	 }
	 assert(offset == total_size);
	 buffer = &brw->vb.buffers[j++];
	 intel_upload_unmap(&brw->intel, map, offset * count, offset,
			    &buffer->bo, &buffer->offset);
	 buffer->stride = offset;
	 buffer->offset -= delta * offset;

	 nr_uploads = 0;
      }
   }
   /* Upload non-interleaved arrays */
   for (i = 0; i < nr_uploads; i++) {
      struct brw_vertex_buffer *buffer = &brw->vb.buffers[j];
      copy_array_to_vbo_array(brw, upload[i], min_index, max_index,
			      buffer, upload[i]->element_size);
      buffer->offset -= delta * buffer->stride;
      upload[i]->buffer = j++;
      upload[i]->offset = 0;
   }

   /* can we simply extend the current vb? */
   if (j == brw->vb.nr_current_buffers) {
      int delta = 0;
      for (i = 0; i < j; i++) {
	 int d;

	 if (brw->vb.current_buffers[i].handle != brw->vb.buffers[i].bo->handle ||
	     brw->vb.current_buffers[i].stride != brw->vb.buffers[i].stride)
	    break;

	 d = brw->vb.buffers[i].offset - brw->vb.current_buffers[i].offset;
	 if (d < 0)
	    break;
	 if (i == 0)
	    delta = d / brw->vb.current_buffers[i].stride;
	 if (delta * brw->vb.current_buffers[i].stride != d)
	    break;
      }

      if (i == j) {
	 brw->vb.start_vertex_bias += delta;
	 while (--j >= 0)
	    drm_intel_bo_unreference(brw->vb.buffers[j].bo);
	 j = 0;
      }
   }

   brw->vb.nr_buffers = j;

validate:
   brw_prepare_query_begin(brw);
   for (i = 0; i < brw->vb.nr_buffers; i++) {
      brw_add_validated_bo(brw, brw->vb.buffers[i].bo);
   }
}
static int prepare_binding_table_pointers(struct brw_context *brw)
{
   brw_add_validated_bo(brw, brw->vs.bind_bo);
   brw_add_validated_bo(brw, brw->wm.bind_bo);
   return 0;
}
/* Upload a new set of constants.  Too much variability to go into the
 * cache mechanism, but maybe would benefit from a comparison against
 * the current uploaded set of constants.
 */
static void prepare_constant_buffer(struct brw_context *brw)
{
   struct gl_context *ctx = &brw->intel.ctx;
   const struct brw_vertex_program *vp =
      brw_vertex_program_const(brw->vertex_program);
   const GLuint sz = brw->curbe.total_size;
   const GLuint bufsz = sz * 16 * sizeof(GLfloat);
   GLfloat *buf;
   GLuint i;

   if (sz == 0) {
      brw->curbe.last_bufsz  = 0;
      return;
   }

   buf = brw->curbe.next_buf;

   /* fragment shader constants */
   if (brw->curbe.wm_size) {
      GLuint offset = brw->curbe.wm_start * 16;

      /* copy float constants */
      for (i = 0; i < brw->wm.prog_data->nr_params; i++) {
	 buf[offset + i] = convert_param(brw->wm.prog_data->param_convert[i],
					 *brw->wm.prog_data->param[i]);
      }
   }


   /* The clipplanes are actually delivered to both CLIP and VS units.
    * VS uses them to calculate the outcode bitmasks.
    */
   if (brw->curbe.clip_size) {
      GLuint offset = brw->curbe.clip_start * 16;
      GLuint j;

      /* If any planes are going this way, send them all this way:
       */
      for (i = 0; i < 6; i++) {
	 buf[offset + i * 4 + 0] = fixed_plane[i][0];
	 buf[offset + i * 4 + 1] = fixed_plane[i][1];
	 buf[offset + i * 4 + 2] = fixed_plane[i][2];
	 buf[offset + i * 4 + 3] = fixed_plane[i][3];
      }

      /* Clip planes: _NEW_TRANSFORM plus _NEW_PROJECTION to get to
       * clip-space:
       */
      assert(MAX_CLIP_PLANES == 6);
      for (j = 0; j < MAX_CLIP_PLANES; j++) {
	 if (ctx->Transform.ClipPlanesEnabled & (1<<j)) {
	    buf[offset + i * 4 + 0] = ctx->Transform._ClipUserPlane[j][0];
	    buf[offset + i * 4 + 1] = ctx->Transform._ClipUserPlane[j][1];
	    buf[offset + i * 4 + 2] = ctx->Transform._ClipUserPlane[j][2];
	    buf[offset + i * 4 + 3] = ctx->Transform._ClipUserPlane[j][3];
	    i++;
	 }
      }
   }

   /* vertex shader constants */
   if (brw->curbe.vs_size) {
      GLuint offset = brw->curbe.vs_start * 16;
      GLuint nr = brw->vs.prog_data->nr_params / 4;

      /* Load the subset of push constants that will get used when
       * we also have a pull constant buffer.
       */
      for (i = 0; i < vp->program.Base.Parameters->NumParameters; i++) {
	 if (brw->vs.constant_map[i] != -1) {
	    assert(brw->vs.constant_map[i] <= nr);
	    memcpy(buf + offset + brw->vs.constant_map[i] * 4,
		   vp->program.Base.Parameters->ParameterValues[i],
		   4 * sizeof(float));
	 }
      }
   }

   if (0) {
      for (i = 0; i < sz*16; i+=4) 
	 printf("curbe %d.%d: %f %f %f %f\n", i/8, i&4,
		buf[i+0], buf[i+1], buf[i+2], buf[i+3]);

      printf("last_buf %p buf %p sz %d/%d cmp %d\n",
	     brw->curbe.last_buf, buf,
	     bufsz, brw->curbe.last_bufsz,
	     brw->curbe.last_buf ? memcmp(buf, brw->curbe.last_buf, bufsz) : -1);
   }

   if (brw->curbe.curbe_bo != NULL &&
       bufsz == brw->curbe.last_bufsz &&
       memcmp(buf, brw->curbe.last_buf, bufsz) == 0) {
      /* constants have not changed */
   } else {
      /* Update the record of what our last set of constants was.  We
       * don't just flip the pointers because we don't fill in the
       * data in the padding between the entries.
       */
      memcpy(brw->curbe.last_buf, buf, bufsz);
      brw->curbe.last_bufsz = bufsz;

      if (brw->curbe.curbe_bo != NULL &&
	  brw->curbe.curbe_next_offset + bufsz > brw->curbe.curbe_bo->size)
      {
	 drm_intel_gem_bo_unmap_gtt(brw->curbe.curbe_bo);
	 drm_intel_bo_unreference(brw->curbe.curbe_bo);
	 brw->curbe.curbe_bo = NULL;
      }

      if (brw->curbe.curbe_bo == NULL) {
	 /* Allocate a single page for CURBE entries for this batchbuffer.
	  * They're generally around 64b.
	  */
	 brw->curbe.curbe_bo = drm_intel_bo_alloc(brw->intel.bufmgr, "CURBE",
						  4096, 1 << 6);
	 brw->curbe.curbe_next_offset = 0;
	 drm_intel_gem_bo_map_gtt(brw->curbe.curbe_bo);
	 assert(bufsz < 4096);
      }

      brw->curbe.curbe_offset = brw->curbe.curbe_next_offset;
      brw->curbe.curbe_next_offset += bufsz;
      brw->curbe.curbe_next_offset = ALIGN(brw->curbe.curbe_next_offset, 64);

      /* Copy data to the buffer:
       */
      memcpy(brw->curbe.curbe_bo->virtual + brw->curbe.curbe_offset,
	     buf,
	     bufsz);
   }

   brw_add_validated_bo(brw, brw->curbe.curbe_bo);

   /* Because this provokes an action (ie copy the constants into the
    * URB), it shouldn't be shortcircuited if identical to the
    * previous time - because eg. the urb destination may have
    * changed, or the urb contents different to last time.
    *
    * Note that the data referred to is actually copied internally,
    * not just used in place according to passed pointer.
    *
    * It appears that the CS unit takes care of using each available
    * URB entry (Const URB Entry == CURBE) in turn, and issuing
    * flushes as necessary when doublebuffering of CURBEs isn't
    * possible.
    */
}