コード例 #1
0
dri_bo *
intel_bufferobj_buffer(struct intel_context *intel,
                       struct intel_buffer_object *intel_obj, GLuint flag)
{
   if (intel_obj->region) {
      if (flag == INTEL_WRITE_PART)
         intel_bufferobj_cow(intel, intel_obj);
      else if (flag == INTEL_WRITE_FULL) {
         intel_bufferobj_release_region(intel, intel_obj);
	 intel_bufferobj_alloc_buffer(intel, intel_obj);
      }
   }

   if (intel_obj->buffer == NULL) {
      void *sys_buffer = intel_obj->sys_buffer;

      /* only one of buffer and sys_buffer could be non-NULL */
      intel_bufferobj_alloc_buffer(intel, intel_obj);
      intel_obj->sys_buffer = NULL;

      intel_bufferobj_subdata(&intel->ctx,
			      GL_ARRAY_BUFFER_ARB,
			      0,
			      intel_obj->Base.Size,
			      sys_buffer,
			      &intel_obj->Base);
      _mesa_free(sys_buffer);
      intel_obj->sys_buffer = NULL;
   }

   return intel_obj->buffer;
}
コード例 #2
0
ファイル: intel_buffer_objects.c プロジェクト: nikai3d/mesa
drm_intel_bo *
intel_bufferobj_buffer(struct intel_context *intel,
                       struct intel_buffer_object *intel_obj,
		       GLuint flag)
{
   if (intel_obj->region) {
      if (flag == INTEL_WRITE_PART)
         intel_bufferobj_cow(intel, intel_obj);
      else if (flag == INTEL_WRITE_FULL) {
         intel_bufferobj_release_region(intel, intel_obj);
	 intel_bufferobj_alloc_buffer(intel, intel_obj);
      }
   }

   if (intel_obj->source)
      release_buffer(intel_obj);

   if (intel_obj->buffer == NULL) {
      intel_bufferobj_alloc_buffer(intel, intel_obj);
      drm_intel_bo_subdata(intel_obj->buffer,
			   0, intel_obj->Base.Size,
			   intel_obj->sys_buffer);

      free(intel_obj->sys_buffer);
      intel_obj->sys_buffer = NULL;
      intel_obj->offset = 0;
   }

   return intel_obj->buffer;
}
コード例 #3
0
ファイル: intel_buffer_objects.c プロジェクト: nikai3d/mesa
/**
 * Called via glMapBufferARB().
 */
static void *
intel_bufferobj_map(struct gl_context * ctx,
                    GLenum target,
                    GLenum access, struct gl_buffer_object *obj)
{
   struct intel_context *intel = intel_context(ctx);
   struct intel_buffer_object *intel_obj = intel_buffer_object(obj);
   GLboolean read_only = (access == GL_READ_ONLY_ARB);
   GLboolean write_only = (access == GL_WRITE_ONLY_ARB);

   assert(intel_obj);

   if (intel_obj->sys_buffer) {
      if (!read_only && intel_obj->source) {
	 release_buffer(intel_obj);
      }

      if (!intel_obj->buffer || intel_obj->source) {
	 obj->Pointer = intel_obj->sys_buffer;
	 obj->Length = obj->Size;
	 obj->Offset = 0;
	 return obj->Pointer;
      }

      free(intel_obj->sys_buffer);
      intel_obj->sys_buffer = NULL;
   }

   /* Flush any existing batchbuffer that might reference this data. */
   if (drm_intel_bo_references(intel->batch.bo, intel_obj->buffer))
      intel_flush(ctx);

   if (intel_obj->region)
      intel_bufferobj_cow(intel, intel_obj);

   if (intel_obj->buffer == NULL) {
      obj->Pointer = NULL;
      return NULL;
   }

   if (write_only) {
      drm_intel_gem_bo_map_gtt(intel_obj->buffer);
      intel_obj->mapped_gtt = GL_TRUE;
   } else {
      drm_intel_bo_map(intel_obj->buffer, !read_only);
      intel_obj->mapped_gtt = GL_FALSE;
   }

   obj->Pointer = intel_obj->buffer->virtual;
   obj->Length = obj->Size;
   obj->Offset = 0;

   return obj->Pointer;
}
コード例 #4
0
/**
 * Replace data in a subrange of buffer object.  If the data range
 * specified by size + offset extends beyond the end of the buffer or
 * if data is NULL, no copy is performed.
 * Called via glBufferSubDataARB().
 */
static void
intel_bufferobj_subdata(GLcontext * ctx,
                        GLenum target,
                        GLintptrARB offset,
                        GLsizeiptrARB size,
                        const GLvoid * data, struct gl_buffer_object *obj)
{
   struct intel_context *intel = intel_context(ctx);
   struct intel_buffer_object *intel_obj = intel_buffer_object(obj);

   if (size == 0)
      return;

   assert(intel_obj);

   if (intel_obj->region)
      intel_bufferobj_cow(intel, intel_obj);

   if (intel_obj->sys_buffer)
      memcpy((char *)intel_obj->sys_buffer + offset, data, size);
   else {
      /* Flush any existing batchbuffer that might reference this data. */
      if (drm_intel_bo_busy(intel_obj->buffer) ||
	  drm_intel_bo_references(intel->batch->buf, intel_obj->buffer)) {
	 drm_intel_bo *temp_bo;

	 temp_bo = drm_intel_bo_alloc(intel->bufmgr, "subdata temp", size, 64);

	 drm_intel_bo_subdata(temp_bo, 0, size, data);

	 intel_emit_linear_blit(intel,
				intel_obj->buffer, offset,
				temp_bo, 0,
				size);

	 drm_intel_bo_unreference(temp_bo);
      } else {
	 drm_intel_bo_subdata(intel_obj->buffer, offset, size, data);
      }
   }
}
コード例 #5
0
/**
 * Called via glMapBufferRange().
 *
 * The goal of this extension is to allow apps to accumulate their rendering
 * at the same time as they accumulate their buffer object.  Without it,
 * you'd end up blocking on execution of rendering every time you mapped
 * the buffer to put new data in.
 *
 * We support it in 3 ways: If unsynchronized, then don't bother
 * flushing the batchbuffer before mapping the buffer, which can save blocking
 * in many cases.  If we would still block, and they allow the whole buffer
 * to be invalidated, then just allocate a new buffer to replace the old one.
 * If not, and we'd block, and they allow the subrange of the buffer to be
 * invalidated, then we can make a new little BO, let them write into that,
 * and blit it into the real BO at unmap time.
 */
static void *
intel_bufferobj_map_range(GLcontext * ctx,
			  GLenum target, GLintptr offset, GLsizeiptr length,
			  GLbitfield access, struct gl_buffer_object *obj)
{
   struct intel_context *intel = intel_context(ctx);
   struct intel_buffer_object *intel_obj = intel_buffer_object(obj);

   assert(intel_obj);

   /* _mesa_MapBufferRange (GL entrypoint) sets these, but the vbo module also
    * internally uses our functions directly.
    */
   obj->Offset = offset;
   obj->Length = length;
   obj->AccessFlags = access;

   if (intel_obj->sys_buffer) {
      obj->Pointer = intel_obj->sys_buffer + offset;
      return obj->Pointer;
   }

   if (intel_obj->region)
      intel_bufferobj_cow(intel, intel_obj);

   /* If the mapping is synchronized with other GL operations, flush
    * the batchbuffer so that GEM knows about the buffer access for later
    * syncing.
    */
   if (!(access & GL_MAP_UNSYNCHRONIZED_BIT) &&
       drm_intel_bo_references(intel->batch->buf, intel_obj->buffer))
      intelFlush(ctx);

   if (intel_obj->buffer == NULL) {
      obj->Pointer = NULL;
      return NULL;
   }

   /* If the user doesn't care about existing buffer contents and mapping
    * would cause us to block, then throw out the old buffer.
    */
   if (!(access & GL_MAP_UNSYNCHRONIZED_BIT) &&
       (access & GL_MAP_INVALIDATE_BUFFER_BIT) &&
       drm_intel_bo_busy(intel_obj->buffer)) {
      drm_intel_bo_unreference(intel_obj->buffer);
      intel_obj->buffer = dri_bo_alloc(intel->bufmgr, "bufferobj",
				       intel_obj->Base.Size, 64);
   }

   /* If the user is mapping a range of an active buffer object but
    * doesn't require the current contents of that range, make a new
    * BO, and we'll copy what they put in there out at unmap or
    * FlushRange time.
    */
   if ((access & GL_MAP_INVALIDATE_RANGE_BIT) &&
       drm_intel_bo_busy(intel_obj->buffer)) {
      if (access & GL_MAP_FLUSH_EXPLICIT_BIT) {
	 intel_obj->range_map_buffer = _mesa_malloc(length);
	 obj->Pointer = intel_obj->range_map_buffer;
      } else {
	 intel_obj->range_map_bo = drm_intel_bo_alloc(intel->bufmgr,
						      "range map",
						      length, 64);
	 if (!(access & GL_MAP_READ_BIT) &&
	     intel->intelScreen->kernel_exec_fencing) {
	    drm_intel_gem_bo_map_gtt(intel_obj->range_map_bo);
	    intel_obj->mapped_gtt = GL_TRUE;
	 } else {
	    drm_intel_bo_map(intel_obj->range_map_bo,
			     (access & GL_MAP_WRITE_BIT) != 0);
	    intel_obj->mapped_gtt = GL_FALSE;
	 }
	 obj->Pointer = intel_obj->range_map_bo->virtual;
      }
      return obj->Pointer;
   }

   if (!(access & GL_MAP_READ_BIT) &&
       intel->intelScreen->kernel_exec_fencing) {
      drm_intel_gem_bo_map_gtt(intel_obj->buffer);
      intel_obj->mapped_gtt = GL_TRUE;
   } else {
      drm_intel_bo_map(intel_obj->buffer, (access & GL_MAP_WRITE_BIT) != 0);
      intel_obj->mapped_gtt = GL_FALSE;
   }

   obj->Pointer = intel_obj->buffer->virtual + offset;
   return obj->Pointer;
}
コード例 #6
0
ファイル: intel_buffer_objects.c プロジェクト: nikai3d/mesa
/**
 * Replace data in a subrange of buffer object.  If the data range
 * specified by size + offset extends beyond the end of the buffer or
 * if data is NULL, no copy is performed.
 * Called via glBufferSubDataARB().
 */
static void
intel_bufferobj_subdata(struct gl_context * ctx,
                        GLenum target,
                        GLintptrARB offset,
                        GLsizeiptrARB size,
                        const GLvoid * data, struct gl_buffer_object *obj)
{
   struct intel_context *intel = intel_context(ctx);
   struct intel_buffer_object *intel_obj = intel_buffer_object(obj);
   bool busy;

   if (size == 0)
      return;

   assert(intel_obj);

   if (intel_obj->region)
      intel_bufferobj_cow(intel, intel_obj);

   /* If we have a single copy in system memory, update that */
   if (intel_obj->sys_buffer) {
      if (intel_obj->source)
	 release_buffer(intel_obj);

      if (intel_obj->buffer == NULL) {
	 memcpy((char *)intel_obj->sys_buffer + offset, data, size);
	 return;
      }

      free(intel_obj->sys_buffer);
      intel_obj->sys_buffer = NULL;
   }

   /* Otherwise we need to update the copy in video memory. */
   busy =
      drm_intel_bo_busy(intel_obj->buffer) ||
      drm_intel_bo_references(intel->batch.bo, intel_obj->buffer);

   /* replace the current busy bo with fresh data */
   if (busy && size == intel_obj->Base.Size) {
      drm_intel_bo_unreference(intel_obj->buffer);
      intel_bufferobj_alloc_buffer(intel, intel_obj);
      drm_intel_bo_subdata(intel_obj->buffer, 0, size, data);
   } else if (intel->gen < 6) {
      if (busy) {
	 drm_intel_bo *temp_bo;

	 temp_bo = drm_intel_bo_alloc(intel->bufmgr, "subdata temp", size, 64);

	 drm_intel_bo_subdata(temp_bo, 0, size, data);

	 intel_emit_linear_blit(intel,
				intel_obj->buffer, offset,
				temp_bo, 0,
				size);

	 drm_intel_bo_unreference(temp_bo);
      } else {
	 drm_intel_bo_subdata(intel_obj->buffer, offset, size, data);
      }
   } else {
      /* Can't use the blit to modify the buffer in the middle of batch. */
      if (drm_intel_bo_references(intel->batch.bo, intel_obj->buffer)) {
	 intel_batchbuffer_flush(intel);
      }
      drm_intel_bo_subdata(intel_obj->buffer, offset, size, data);
   }
}