int brw_prepare_indices( struct brw_context *brw,
			 const struct _mesa_index_buffer *index_buffer,
			 dri_bo **bo_return,
			 GLuint *offset_return)
{
   GLcontext *ctx = &brw->intel.ctx;
   struct intel_context *intel = &brw->intel;
   GLuint ib_size = get_size(index_buffer->type) * index_buffer->count;
   dri_bo *bo;
   struct gl_buffer_object *bufferobj = index_buffer->obj;
   GLuint offset = (GLuint)index_buffer->ptr;
   int ret;

   /* Turn into a proper VBO:
    */
   if (!bufferobj->Name) {
     
      /* Get new bufferobj, offset:
       */
      get_space(brw, ib_size, &bo, &offset);

      /* Straight upload
       */
      dri_bo_subdata(bo, offset, ib_size, index_buffer->ptr);
   } else {
      /* If the index buffer isn't aligned to its element size, we have to
       * rebase it into a temporary.
       */
       if ((get_size(index_buffer->type) - 1) & offset) {
           GLubyte *map = ctx->Driver.MapBuffer(ctx,
                                                GL_ELEMENT_ARRAY_BUFFER_ARB,
                                                GL_DYNAMIC_DRAW_ARB,
                                                bufferobj);
           map += offset;

	   get_space(brw, ib_size, &bo, &offset);

	   dri_bo_subdata(bo, offset, ib_size, map);

           ctx->Driver.UnmapBuffer(ctx, GL_ELEMENT_ARRAY_BUFFER_ARB, bufferobj);
       } else {
	  bo = intel_bufferobj_buffer(intel, intel_buffer_object(bufferobj),
				      INTEL_READ);
	  dri_bo_reference(bo);
       }
   }

   *bo_return = bo;
   *offset_return = offset;
   ret = dri_bufmgr_check_aperture_space(bo);
   return ret;
}
static void
copy_array_to_vbo_array( struct brw_context *brw,
			 struct brw_vertex_element *element,
			 GLuint dst_stride)
{
   GLuint size = element->count * dst_stride;

   get_space(brw, size, &element->bo, &element->offset);

   if (element->glarray->StrideB == 0) {
      assert(element->count == 1);
      element->stride = 0;
   } else {
      element->stride = dst_stride;
   }

   if (dst_stride == element->glarray->StrideB) {
      dri_bo_subdata(element->bo,
		     element->offset,
		     size,
		     element->glarray->Ptr);
   } else {
      void *data;
      char *dest;
      const char *src = element->glarray->Ptr;
      int i;

      data = _mesa_malloc(dst_stride * element->count);
      dest = data;
      for (i = 0; i < element->count; i++) {
	 memcpy(dest, src, dst_stride);
	 src += element->glarray->StrideB;
	 dest += dst_stride;
      }

      dri_bo_subdata(element->bo,
		     element->offset,
		     size,
		     data);
      _mesa_free(data);
   }
}
Esempio n. 3
0
static void intel_end_vertex(intel_screen_private *intel)
{
	if (intel->vertex_bo) {
		if (intel->vertex_used) {
			dri_bo_subdata(intel->vertex_bo, 0, intel->vertex_used*4, intel->vertex_ptr);
			intel->vertex_used = 0;
		}

		dri_bo_unreference(intel->vertex_bo);
		intel->vertex_bo = NULL;
	}

	intel->vertex_id = 0;
}
Esempio n. 4
0
static void
drmmode_load_cursor_argb (xf86CrtcPtr crtc, CARD32 *image)
{
	drmmode_crtc_private_ptr drmmode_crtc = crtc->driver_private;
	int ret;

	/* cursor should be mapped already */
	ret = dri_bo_subdata(drmmode_crtc->cursor, 0, 64*64*4, image);
	if (ret)
		xf86DrvMsg(crtc->scrn->scrnIndex, X_ERROR,
			   "failed to set cursor: %s", strerror(-ret));

	return;
}
/**
 * Allocate space for and store data in a buffer object.  Any data that was
 * previously stored in the buffer object is lost.  If data is NULL,
 * memory will be allocated, but no copy will occur.
 * Called via ctx->Driver.BufferData().
 * \return GL_TRUE for success, GL_FALSE if out of memory
 */
static GLboolean
intel_bufferobj_data(GLcontext * ctx,
                     GLenum target,
                     GLsizeiptrARB size,
                     const GLvoid * data,
                     GLenum usage, struct gl_buffer_object *obj)
{
   struct intel_context *intel = intel_context(ctx);
   struct intel_buffer_object *intel_obj = intel_buffer_object(obj);

   intel_obj->Base.Size = size;
   intel_obj->Base.Usage = usage;

   assert(!obj->Pointer); /* Mesa should have unmapped it */

   if (intel_obj->region)
      intel_bufferobj_release_region(intel, intel_obj);

   if (intel_obj->buffer != NULL) {
      dri_bo_unreference(intel_obj->buffer);
      intel_obj->buffer = NULL;
   }
   _mesa_free(intel_obj->sys_buffer);
   intel_obj->sys_buffer = NULL;

   if (size != 0) {
#ifdef I915
      /* On pre-965, stick VBOs in system memory, as we're always doing swtnl
       * with their contents anyway.
       */
      if (target == GL_ARRAY_BUFFER || target == GL_ELEMENT_ARRAY_BUFFER) {
	 intel_obj->sys_buffer = _mesa_malloc(size);
	 if (intel_obj->sys_buffer != NULL) {
	    if (data != NULL)
	       memcpy(intel_obj->sys_buffer, data, size);
	    return GL_TRUE;
	 }
      }
#endif
      intel_bufferobj_alloc_buffer(intel, intel_obj);
      if (!intel_obj->buffer)
         return GL_FALSE;

      if (data != NULL)
	 dri_bo_subdata(intel_obj->buffer, 0, size, data);
   }

   return GL_TRUE;
}
/* Creates a new WM constant buffer reflecting the current fragment program's
 * constants, if needed by the fragment program.
 *
 * Otherwise, constants go through the CURBEs using the brw_constant_buffer
 * state atom.
 */
static drm_intel_bo *
brw_wm_update_constant_buffer(struct brw_context *brw)
{
   struct intel_context *intel = &brw->intel;
   struct brw_fragment_program *fp =
      (struct brw_fragment_program *) brw->fragment_program;
   const struct gl_program_parameter_list *params = fp->program.Base.Parameters;
   const int size = params->NumParameters * 4 * sizeof(GLfloat);
   drm_intel_bo *const_buffer;

   /* BRW_NEW_FRAGMENT_PROGRAM */
   if (!fp->use_const_buffer)
      return NULL;

   const_buffer = drm_intel_bo_alloc(intel->bufmgr, "fp_const_buffer",
				     size, 64);

   /* _NEW_PROGRAM_CONSTANTS */
   dri_bo_subdata(const_buffer, 0, size, params->ParameterValues);

   return const_buffer;
}
/**
 * Replace data in a subrange of buffer object.  If the data range
 * specified by size + offset extends beyond the end of the buffer or
 * if data is NULL, no copy is performed.
 * Called via glBufferSubDataARB().
 */
static void
intel_bufferobj_subdata(GLcontext * ctx,
                        GLenum target,
                        GLintptrARB offset,
                        GLsizeiptrARB size,
                        const GLvoid * data, struct gl_buffer_object *obj)
{
   struct intel_context *intel = intel_context(ctx);
   struct intel_buffer_object *intel_obj = intel_buffer_object(obj);

   assert(intel_obj);

   if (intel_obj->region)
      intel_bufferobj_cow(intel, intel_obj);

   if (intel_obj->sys_buffer)
      memcpy((char *)intel_obj->sys_buffer + offset, data, size);
   else {
      /* Flush any existing batchbuffer that might reference this data. */
      if (drm_intel_bo_busy(intel_obj->buffer) ||
	  drm_intel_bo_references(intel->batch->buf, intel_obj->buffer)) {
	 drm_intel_bo *temp_bo;

	 temp_bo = drm_intel_bo_alloc(intel->bufmgr, "subdata temp", size, 64);

	 drm_intel_bo_subdata(temp_bo, 0, size, data);

	 intel_emit_linear_blit(intel,
				intel_obj->buffer, offset,
				temp_bo, 0,
				size);

	 drm_intel_bo_unreference(temp_bo);
      } else {
	 dri_bo_subdata(intel_obj->buffer, offset, size, data);
      }
   }
}
Esempio n. 8
0
void intel_batch_submit(ScrnInfoPtr scrn)
{
	intel_screen_private *intel = intel_get_screen_private(scrn);
	int ret;

	assert (!intel->in_batch_atomic);

	if (intel->vertex_flush)
		intel->vertex_flush(intel);
	intel_end_vertex(intel);

	if (intel->batch_flush)
		intel->batch_flush(intel);

	if (intel->batch_used == 0)
		return;

	/* Mark the end of the batchbuffer. */
	OUT_BATCH(MI_BATCH_BUFFER_END);
	/* Emit a padding dword if we aren't going to be quad-word aligned. */
	if (intel->batch_used & 1)
		OUT_BATCH(MI_NOOP);

	if (DUMP_BATCHBUFFERS) {
	    FILE *file = fopen(DUMP_BATCHBUFFERS, "a");
	    if (file) {
		fwrite (intel->batch_ptr, intel->batch_used*4, 1, file);
		fclose(file);
	    }
	}

	ret = dri_bo_subdata(intel->batch_bo, 0, intel->batch_used*4, intel->batch_ptr);
	if (ret == 0) {
		ret = drm_intel_bo_mrb_exec(intel->batch_bo,
				intel->batch_used*4,
				NULL, 0, 0xffffffff,
				(HAS_BLT(intel) ?
				 intel->current_batch:
				 I915_EXEC_DEFAULT));
	}

	if (ret != 0) {
		static int once;
		if (!once) {
			if (ret == -EIO) {
				/* The GPU has hung and unlikely to recover by this point. */
				xf86DrvMsg(scrn->scrnIndex, X_ERROR, "Detected a hung GPU, disabling acceleration.\n");
				xf86DrvMsg(scrn->scrnIndex, X_ERROR, "When reporting this, please include i915_error_state from debugfs and the full dmesg.\n");
			} else {
				/* The driver is broken. */
				xf86DrvMsg(scrn->scrnIndex, X_ERROR,
					   "Failed to submit batch buffer, expect rendering corruption: %s.\n ",
					   strerror(-ret));
			}
			uxa_set_force_fallback(xf86ScrnToScreen(scrn), TRUE);
			intel->force_fallback = TRUE;
			once = 1;
		}
	}

	while (!list_is_empty(&intel->batch_pixmaps)) {
		struct intel_pixmap *entry;

		entry = list_first_entry(&intel->batch_pixmaps,
					 struct intel_pixmap,
					 batch);

		entry->busy = -1;
		entry->dirty = 0;
		list_del(&entry->batch);
	}

	if (intel->debug_flush & DEBUG_FLUSH_WAIT)
		drm_intel_bo_wait_rendering(intel->batch_bo);

	intel_next_batch(scrn, intel->current_batch == I915_EXEC_BLT);

	if (intel->batch_commit_notify)
		intel->batch_commit_notify(intel);

	intel->current_batch = 0;
}