static void
intel_bufferobj_copy_subdata(struct gl_context *ctx,
			     struct gl_buffer_object *src,
			     struct gl_buffer_object *dst,
			     GLintptr read_offset, GLintptr write_offset,
			     GLsizeiptr size)
{
   struct intel_context *intel = intel_context(ctx);
   struct intel_buffer_object *intel_src = intel_buffer_object(src);
   struct intel_buffer_object *intel_dst = intel_buffer_object(dst);
   drm_intel_bo *src_bo, *dst_bo;
   GLuint src_offset;

   if (size == 0)
      return;

   /* If we're in system memory, just map and memcpy. */
   if (intel_src->sys_buffer || intel_dst->sys_buffer || intel->gen >= 6) {
      /* The same buffer may be used, but note that regions copied may
       * not overlap.
       */
      if (src == dst) {
	 char *ptr = intel_bufferobj_map_range(ctx, 0, dst->Size,
					       GL_MAP_READ_BIT, dst);
	 memmove(ptr + write_offset, ptr + read_offset, size);
	 intel_bufferobj_unmap(ctx, dst);
      } else {
	 const char *src_ptr;
	 char *dst_ptr;

	 src_ptr =  intel_bufferobj_map_range(ctx, 0, src->Size,
					      GL_MAP_READ_BIT, src);
	 dst_ptr =  intel_bufferobj_map_range(ctx, 0, dst->Size,
					      GL_MAP_WRITE_BIT, dst);

	 memcpy(dst_ptr + write_offset, src_ptr + read_offset, size);

	 intel_bufferobj_unmap(ctx, src);
	 intel_bufferobj_unmap(ctx, dst);
      }
      return;
   }

   /* Otherwise, we have real BOs, so blit them. */

   dst_bo = intel_bufferobj_buffer(intel, intel_dst, INTEL_WRITE_PART);
   src_bo = intel_bufferobj_source(intel, intel_src, 64, &src_offset);

   intel_emit_linear_blit(intel,
			  dst_bo, write_offset,
			  src_bo, read_offset + src_offset, size);

   /* Since we've emitted some blits to buffers that will (likely) be used
    * in rendering operations in other cache domains in this batch, emit a
    * flush.  Once again, we wish for a domain tracker in libdrm to cover
    * usage inside of a batchbuffer.
    */
   intel_batchbuffer_emit_mi_flush(intel);
}
Exemple #2
0
static GLenum
intel_buffer_object_purgeable(struct gl_context * ctx,
                              struct gl_buffer_object *obj,
                              GLenum option)
{
    struct intel_buffer_object *intel_obj = intel_buffer_object (obj);

    if (intel_obj->buffer != NULL)
        return intel_buffer_purgeable(intel_obj->buffer);

    if (option == GL_RELEASED_APPLE) {
        if (intel_obj->sys_buffer != NULL) {
            free(intel_obj->sys_buffer);
            intel_obj->sys_buffer = NULL;
        }

        return GL_RELEASED_APPLE;
    } else {
        /* XXX Create the buffer and madvise(MADV_DONTNEED)? */
        struct intel_context *intel = intel_context(ctx);
        drm_intel_bo *bo = intel_bufferobj_buffer(intel, intel_obj, INTEL_READ);

        return intel_buffer_purgeable(bo);
    }
}
Exemple #3
0
void brw_draw_init( struct brw_context *brw )
{
   GLcontext *ctx = &brw->intel.ctx;
   struct vbo_context *vbo = vbo_context(ctx);
   GLuint i;
   
   /* Register our drawing function: 
    */
   vbo->draw_prims = brw_draw_prims;

   brw->vb.upload.size = BRW_UPLOAD_INIT_SIZE;

   for (i = 0; i < BRW_NR_UPLOAD_BUFS; i++) {
      brw->vb.upload.vbo[i] = ctx->Driver.NewBufferObject(ctx, 1, GL_ARRAY_BUFFER_ARB);
      
      /* NOTE:  These are set to no-backing-store.
       */
      bmBufferSetInvalidateCB(&brw->intel,
			      intel_bufferobj_buffer(intel_buffer_object(brw->vb.upload.vbo[i])),
			      brw_invalidate_vbo_cb,
			      &brw->intel,
			      GL_TRUE);
   }

   ctx->Driver.BufferData( ctx, 
			   GL_ARRAY_BUFFER_ARB, 
			   BRW_UPLOAD_INIT_SIZE,
			   NULL,
			   GL_DYNAMIC_DRAW_ARB,
			   brw->vb.upload.vbo[0] );
}
Exemple #4
0
/**
 * The BufferData() driver hook.
 *
 * Implements glBufferData(), which recreates a buffer object's data store
 * and populates it with the given data, if present.
 *
 * Any data that was previously stored in the buffer object is lost.
 *
 * \return true for success, false if out of memory
 */
static GLboolean
intel_bufferobj_data(struct gl_context * ctx,
                     GLenum target,
                     GLsizeiptrARB size,
                     const GLvoid * data,
                     GLenum usage, struct gl_buffer_object *obj)
{
   struct brw_context *brw = brw_context(ctx);
   struct intel_buffer_object *intel_obj = intel_buffer_object(obj);

   /* Part of the ABI, but this function doesn't use it.
    */
   (void) target;

   intel_obj->Base.Size = size;
   intel_obj->Base.Usage = usage;

   assert(!obj->Pointer); /* Mesa should have unmapped it */

   if (intel_obj->buffer != NULL)
      release_buffer(intel_obj);

   if (size != 0) {
      intel_bufferobj_alloc_buffer(brw, intel_obj);
      if (!intel_obj->buffer)
         return false;

      if (data != NULL)
	 drm_intel_bo_subdata(intel_obj->buffer, 0, size, data);
   }

   return true;
}
/**
 * Deallocate/free a vertex/pixel buffer object.
 * Called via glDeleteBuffersARB().
 */
static void
intel_bufferobj_free(GLcontext * ctx, struct gl_buffer_object *obj)
{
   struct intel_context *intel = intel_context(ctx);
   struct intel_buffer_object *intel_obj = intel_buffer_object(obj);

   assert(intel_obj);

   /* Buffer objects are automatically unmapped when deleting according
    * to the spec, but Mesa doesn't do UnmapBuffer for us at context destroy
    * (though it does if you call glDeleteBuffers)
    */
   if (obj->Pointer)
      intel_bufferobj_unmap(ctx, 0, obj);

   _mesa_free(intel_obj->sys_buffer);
   if (intel_obj->region) {
      intel_bufferobj_release_region(intel, intel_obj);
   }
   else if (intel_obj->buffer) {
      dri_bo_unreference(intel_obj->buffer);
   }

   _mesa_free(intel_obj);
}
/* Ideally we'd use a BO to avoid taking up cache space for the temporary
 * data, but FlushMappedBufferRange may be followed by further writes to
 * the pointer, so we would have to re-map after emitting our blit, which
 * would defeat the point.
 */
static void
intel_bufferobj_flush_mapped_range(GLcontext *ctx, GLenum target,
				   GLintptr offset, GLsizeiptr length,
				   struct gl_buffer_object *obj)
{
   struct intel_context *intel = intel_context(ctx);
   struct intel_buffer_object *intel_obj = intel_buffer_object(obj);
   drm_intel_bo *temp_bo;

   /* Unless we're in the range map using a temporary system buffer,
    * there's no work to do.
    */
   if (intel_obj->range_map_buffer == NULL)
      return;

   temp_bo = drm_intel_bo_alloc(intel->bufmgr, "range map flush", length, 64);

   drm_intel_bo_subdata(temp_bo, 0, length, intel_obj->range_map_buffer);

   intel_emit_linear_blit(intel,
			  intel_obj->buffer, obj->Offset + offset,
			  temp_bo, 0,
			  length);

   drm_intel_bo_unreference(temp_bo);
}
static GLenum
intel_buffer_object_unpurgeable(GLcontext * ctx,
                                struct gl_buffer_object *obj,
                                GLenum option)
{
    return intel_buffer_unpurgeable (ctx, intel_buffer_object (obj)->buffer, option);
}
Exemple #8
0
/**
 * Replace data in a subrange of buffer object.  If the data range
 * specified by size + offset extends beyond the end of the buffer or
 * if data is NULL, no copy is performed.
 * Called via glBufferSubDataARB().
 */
static void
intel_bufferobj_subdata(struct gl_context * ctx,
                        GLintptrARB offset,
                        GLsizeiptrARB size,
                        const GLvoid * data, struct gl_buffer_object *obj)
{
   struct intel_context *intel = intel_context(ctx);
   struct intel_buffer_object *intel_obj = intel_buffer_object(obj);
   bool busy;

   if (size == 0)
      return;

   assert(intel_obj);

   /* If we have a single copy in system memory, update that */
   if (intel_obj->sys_buffer) {
      if (intel_obj->source)
	 release_buffer(intel_obj);

      if (intel_obj->buffer == NULL) {
	 memcpy((char *)intel_obj->sys_buffer + offset, data, size);
	 return;
      }

      free(intel_obj->sys_buffer);
      intel_obj->sys_buffer = NULL;
   }

   /* Otherwise we need to update the copy in video memory. */
   busy =
      drm_intel_bo_busy(intel_obj->buffer) ||
      drm_intel_bo_references(intel->batch.bo, intel_obj->buffer);

   if (busy) {
      if (size == intel_obj->Base.Size) {
	 /* Replace the current busy bo with fresh data. */
	 drm_intel_bo_unreference(intel_obj->buffer);
	 intel_bufferobj_alloc_buffer(intel, intel_obj);
	 drm_intel_bo_subdata(intel_obj->buffer, 0, size, data);
      } else {
         perf_debug("Using a blit copy to avoid stalling on %ldb "
                    "glBufferSubData() to a busy buffer object.\n",
                    (long)size);
	 drm_intel_bo *temp_bo =
	    drm_intel_bo_alloc(intel->bufmgr, "subdata temp", size, 64);

	 drm_intel_bo_subdata(temp_bo, 0, size, data);

	 intel_emit_linear_blit(intel,
				intel_obj->buffer, offset,
				temp_bo, 0,
				size);

	 drm_intel_bo_unreference(temp_bo);
      }
   } else {
      drm_intel_bo_subdata(intel_obj->buffer, offset, size, data);
   }
}
Exemple #9
0
static void
gen8_upload_3dstate_so_buffers(struct brw_context *brw)
{
   struct gl_context *ctx = &brw->ctx;
   /* BRW_NEW_TRANSFORM_FEEDBACK */
   struct gl_transform_feedback_object *xfb_obj =
      ctx->TransformFeedback.CurrentObject;
   struct brw_transform_feedback_object *brw_obj =
      (struct brw_transform_feedback_object *) xfb_obj;

   /* Set up the up to 4 output buffers.  These are the ranges defined in the
    * gl_transform_feedback_object.
    */
   for (int i = 0; i < 4; i++) {
      struct intel_buffer_object *bufferobj =
         intel_buffer_object(xfb_obj->Buffers[i]);

      if (!bufferobj) {
         BEGIN_BATCH(8);
         OUT_BATCH(_3DSTATE_SO_BUFFER << 16 | (8 - 2));
         OUT_BATCH((i << SO_BUFFER_INDEX_SHIFT));
         OUT_BATCH(0);
         OUT_BATCH(0);
         OUT_BATCH(0);
         OUT_BATCH(0);
         OUT_BATCH(0);
         OUT_BATCH(0);
         ADVANCE_BATCH();
         continue;
      }

      uint32_t start = xfb_obj->Offset[i];
      assert(start % 4 == 0);
      uint32_t end = ALIGN(start + xfb_obj->Size[i], 4);
      drm_intel_bo *bo =
         intel_bufferobj_buffer(brw, bufferobj, start, end - start);
      assert(end <= bo->size);

      perf_debug("Missing MOCS setup for 3DSTATE_SO_BUFFER.");

      BEGIN_BATCH(8);
      OUT_BATCH(_3DSTATE_SO_BUFFER << 16 | (8 - 2));
      OUT_BATCH(GEN8_SO_BUFFER_ENABLE | (i << SO_BUFFER_INDEX_SHIFT) |
                GEN8_SO_BUFFER_OFFSET_WRITE_ENABLE |
                GEN8_SO_BUFFER_OFFSET_ADDRESS_ENABLE |
                (BDW_MOCS_WB << 22));
      OUT_RELOC64(bo, I915_GEM_DOMAIN_RENDER, I915_GEM_DOMAIN_RENDER, start);
      OUT_BATCH(xfb_obj->Size[i] / 4 - 1);
      OUT_RELOC64(brw_obj->offset_bo,
                  I915_GEM_DOMAIN_INSTRUCTION, I915_GEM_DOMAIN_INSTRUCTION,
                  i * sizeof(uint32_t));
      if (brw_obj->zero_offsets)
         OUT_BATCH(0); /* Zero out the offset and write that to offset_bo */
      else
         OUT_BATCH(0xFFFFFFFF); /* Use offset_bo as the "Stream Offset." */
      ADVANCE_BATCH();
   }
   brw_obj->zero_offsets = false;
}
Exemple #10
0
static bool
intel_set_texture_storage_for_buffer_object(struct gl_context *ctx,
                                            struct gl_texture_object *tex_obj,
                                            struct gl_buffer_object *buffer_obj,
                                            uint32_t buffer_offset,
                                            uint32_t row_stride,
                                            bool read_only)
{
   struct brw_context *brw = brw_context(ctx);
   struct intel_texture_object *intel_texobj = intel_texture_object(tex_obj);
   struct gl_texture_image *image = tex_obj->Image[0][0];
   struct intel_texture_image *intel_image = intel_texture_image(image);
   struct intel_buffer_object *intel_buffer_obj = intel_buffer_object(buffer_obj);

   if (!read_only) {
      /* Renderbuffers have the restriction that the buffer offset and
       * surface pitch must be a multiple of the element size.  If it's
       * not, we have to fail and fall back to software.
       */
      int cpp = _mesa_get_format_bytes(image->TexFormat);
      if (buffer_offset % cpp || row_stride % cpp) {
         perf_debug("Bad PBO alignment; fallback to CPU mapping\n");
         return false;
      }

      if (!brw->format_supported_as_render_target[image->TexFormat]) {
         perf_debug("Non-renderable PBO format; fallback to CPU mapping\n");
         return false;
      }
   }

   assert(intel_texobj->mt == NULL);

   drm_intel_bo *bo = intel_bufferobj_buffer(brw, intel_buffer_obj,
                                             buffer_offset,
                                             row_stride * image->Height);
   intel_texobj->mt =
      intel_miptree_create_for_bo(brw, bo,
                                  image->TexFormat,
                                  buffer_offset,
                                  image->Width, image->Height, image->Depth,
                                  row_stride,
                                  0);
   if (!intel_texobj->mt)
      return false;

   if (!_swrast_init_texture_image(image))
      return false;

   intel_miptree_reference(&intel_image->mt, intel_texobj->mt);

   /* The miptree is in a validated state, so no need to check later. */
   intel_texobj->needs_validate = false;
   intel_texobj->validated_first_level = 0;
   intel_texobj->validated_last_level = 0;
   intel_texobj->_Format = intel_texobj->mt->format;

   return true;
}
Exemple #11
0
static GLenum
intel_buffer_object_unpurgeable(struct gl_context * ctx,
                                struct gl_buffer_object *obj,
                                GLenum option)
{
   (void) ctx;
   (void) option;

   return intel_buffer_unpurgeable(intel_buffer_object(obj)->buffer);
}
Exemple #12
0
/* XXX: Do this for TexSubImage also:
 */
static GLboolean
try_pbo_upload(struct intel_context *intel,
               struct intel_texture_image *intelImage,
               const struct gl_pixelstore_attrib *unpack,
               GLint internalFormat,
               GLint width, GLint height,
               GLenum format, GLenum type, const void *pixels)
{
   struct intel_buffer_object *pbo = intel_buffer_object(unpack->BufferObj);
   GLuint src_offset, src_stride;
   GLuint dst_offset, dst_stride;

   if (!pbo ||
       intel->ctx._ImageTransferState ||
       unpack->SkipPixels || unpack->SkipRows) {
      _mesa_printf("%s: failure 1\n", __FUNCTION__);
      return GL_FALSE;
   }

   src_offset = (GLuint) pixels;

   if (unpack->RowLength > 0)
      src_stride = unpack->RowLength;
   else
      src_stride = width;

   dst_offset = intel_miptree_image_offset(intelImage->mt,
                                           intelImage->face,
                                           intelImage->level);

   dst_stride = intelImage->mt->pitch;

   intelFlush(&intel->ctx);
   LOCK_HARDWARE(intel);
   {
      struct _DriBufferObject *src_buffer =
         intel_bufferobj_buffer(intel, pbo, INTEL_READ);
      struct _DriBufferObject *dst_buffer =
         intel_region_buffer(intel->intelScreen, intelImage->mt->region,
                             INTEL_WRITE_FULL);


      intelEmitCopyBlit(intel,
                        intelImage->mt->cpp,
                        src_stride, src_buffer, src_offset,
                        dst_stride, dst_buffer, dst_offset,
                        0, 0, 0, 0, width, height,
			GL_COPY);

      intel_batchbuffer_flush(intel->batch);
   }
   UNLOCK_HARDWARE(intel);

   return GL_TRUE;
}
Exemple #13
0
/**
 * Called via glMapBufferARB().
 */
static void *
intel_bufferobj_map(struct gl_context * ctx,
                    GLenum target,
                    GLenum access, struct gl_buffer_object *obj)
{
   struct intel_context *intel = intel_context(ctx);
   struct intel_buffer_object *intel_obj = intel_buffer_object(obj);
   GLboolean read_only = (access == GL_READ_ONLY_ARB);
   GLboolean write_only = (access == GL_WRITE_ONLY_ARB);

   assert(intel_obj);

   if (intel_obj->sys_buffer) {
      if (!read_only && intel_obj->source) {
	 release_buffer(intel_obj);
      }

      if (!intel_obj->buffer || intel_obj->source) {
	 obj->Pointer = intel_obj->sys_buffer;
	 obj->Length = obj->Size;
	 obj->Offset = 0;
	 return obj->Pointer;
      }

      free(intel_obj->sys_buffer);
      intel_obj->sys_buffer = NULL;
   }

   /* Flush any existing batchbuffer that might reference this data. */
   if (drm_intel_bo_references(intel->batch.bo, intel_obj->buffer))
      intel_flush(ctx);

   if (intel_obj->region)
      intel_bufferobj_cow(intel, intel_obj);

   if (intel_obj->buffer == NULL) {
      obj->Pointer = NULL;
      return NULL;
   }

   if (write_only) {
      drm_intel_gem_bo_map_gtt(intel_obj->buffer);
      intel_obj->mapped_gtt = GL_TRUE;
   } else {
      drm_intel_bo_map(intel_obj->buffer, !read_only);
      intel_obj->mapped_gtt = GL_FALSE;
   }

   obj->Pointer = intel_obj->buffer->virtual;
   obj->Length = obj->Size;
   obj->Offset = 0;

   return obj->Pointer;
}
Exemple #14
0
static void
upload_3dstate_so_buffers(struct brw_context *brw)
{
    struct gl_context *ctx = &brw->ctx;
    /* BRW_NEW_VERTEX_PROGRAM */
    const struct gl_shader_program *vs_prog =
            ctx->Shader.CurrentVertexProgram;
    const struct gl_transform_feedback_info *linked_xfb_info =
            &vs_prog->LinkedTransformFeedback;
    /* BRW_NEW_TRANSFORM_FEEDBACK */
    struct gl_transform_feedback_object *xfb_obj =
            ctx->TransformFeedback.CurrentObject;
    int i;

    /* Set up the up to 4 output buffers.  These are the ranges defined in the
     * gl_transform_feedback_object.
     */
    for (i = 0; i < 4; i++) {
        struct intel_buffer_object *bufferobj =
            intel_buffer_object(xfb_obj->Buffers[i]);
        drm_intel_bo *bo;
        uint32_t start, end;
        uint32_t stride;

        if (!xfb_obj->Buffers[i]) {
            /* The pitch of 0 in this command indicates that the buffer is
             * unbound and won't be written to.
             */
            BEGIN_BATCH(4);
            OUT_BATCH(_3DSTATE_SO_BUFFER << 16 | (4 - 2));
            OUT_BATCH((i << SO_BUFFER_INDEX_SHIFT));
            OUT_BATCH(0);
            OUT_BATCH(0);
            ADVANCE_BATCH();

            continue;
        }

        bo = intel_bufferobj_buffer(brw, bufferobj, INTEL_WRITE_PART);
        stride = linked_xfb_info->BufferStride[i] * 4;

        start = xfb_obj->Offset[i];
        assert(start % 4 == 0);
        end = ALIGN(start + xfb_obj->Size[i], 4);
        assert(end <= bo->size);

        BEGIN_BATCH(4);
        OUT_BATCH(_3DSTATE_SO_BUFFER << 16 | (4 - 2));
        OUT_BATCH((i << SO_BUFFER_INDEX_SHIFT) | stride);
        OUT_RELOC(bo, I915_GEM_DOMAIN_RENDER, I915_GEM_DOMAIN_RENDER, start);
        OUT_RELOC(bo, I915_GEM_DOMAIN_RENDER, I915_GEM_DOMAIN_RENDER, end);
        ADVANCE_BATCH();
    }
}
/**
 * Called via glUnmapBuffer().
 */
static GLboolean
intel_bufferobj_unmap(GLcontext * ctx,
                      GLenum target, struct gl_buffer_object *obj)
{
   struct intel_context *intel = intel_context(ctx);
   struct intel_buffer_object *intel_obj = intel_buffer_object(obj);

   assert(intel_obj);
   assert(obj->Pointer);
   if (intel_obj->sys_buffer != NULL) {
      /* always keep the mapping around. */
   } else if (intel_obj->range_map_buffer != NULL) {
      /* Since we've emitted some blits to buffers that will (likely) be used
       * in rendering operations in other cache domains in this batch, emit a
       * flush.  Once again, we wish for a domain tracker in libdrm to cover
       * usage inside of a batchbuffer.
       */
      intel_batchbuffer_emit_mi_flush(intel->batch);
      free(intel_obj->range_map_buffer);
      intel_obj->range_map_buffer = NULL;
   } else if (intel_obj->range_map_bo != NULL) {
      if (intel_obj->mapped_gtt) {
	 drm_intel_gem_bo_unmap_gtt(intel_obj->range_map_bo);
      } else {
	 drm_intel_bo_unmap(intel_obj->range_map_bo);
      }

      intel_emit_linear_blit(intel,
			     intel_obj->buffer, obj->Offset,
			     intel_obj->range_map_bo, 0,
			     obj->Length);

      /* Since we've emitted some blits to buffers that will (likely) be used
       * in rendering operations in other cache domains in this batch, emit a
       * flush.  Once again, we wish for a domain tracker in libdrm to cover
       * usage inside of a batchbuffer.
       */
      intel_batchbuffer_emit_mi_flush(intel->batch);

      drm_intel_bo_unreference(intel_obj->range_map_bo);
      intel_obj->range_map_bo = NULL;
   } else if (intel_obj->buffer != NULL) {
      if (intel_obj->mapped_gtt) {
	 drm_intel_gem_bo_unmap_gtt(intel_obj->buffer);
      } else {
	 drm_intel_bo_unmap(intel_obj->buffer);
      }
   }
   obj->Pointer = NULL;
   obj->Offset = 0;
   obj->Length = 0;

   return GL_TRUE;
}
/**
 * Allocate space for and store data in a buffer object.  Any data that was
 * previously stored in the buffer object is lost.  If data is NULL,
 * memory will be allocated, but no copy will occur.
 * Called via ctx->Driver.BufferData().
 * \return true for success, false if out of memory
 */
static GLboolean
intel_bufferobj_data(struct gl_context * ctx,
                     GLenum target,
                     GLsizeiptrARB size,
                     const GLvoid * data,
                     GLenum usage, struct gl_buffer_object *obj)
{
   struct intel_context *intel = intel_context(ctx);
   struct intel_buffer_object *intel_obj = intel_buffer_object(obj);

   /* Part of the ABI, but this function doesn't use it.
    */
#ifndef I915
   (void) target;
#endif

   intel_obj->Base.Size = size;
   intel_obj->Base.Usage = usage;

   assert(!obj->Pointer); /* Mesa should have unmapped it */

   if (intel_obj->buffer != NULL)
      release_buffer(intel_obj);

   free(intel_obj->sys_buffer);
   intel_obj->sys_buffer = NULL;

   if (size != 0) {
      if (usage == GL_DYNAMIC_DRAW
#ifdef I915
	  /* On pre-965, stick VBOs in system memory, as we're always doing
	   * swtnl with their contents anyway.
	   */
	  || target == GL_ARRAY_BUFFER || target == GL_ELEMENT_ARRAY_BUFFER
#endif
	 )
      {
	 intel_obj->sys_buffer = malloc(size);
	 if (intel_obj->sys_buffer != NULL) {
	    if (data != NULL)
	       memcpy(intel_obj->sys_buffer, data, size);
	    return true;
	 }
      }
      intel_bufferobj_alloc_buffer(intel, intel_obj);
      if (!intel_obj->buffer)
         return false;

      if (data != NULL)
	 drm_intel_bo_subdata(intel_obj->buffer, 0, size, data);
   }

   return true;
}
int brw_prepare_indices( struct brw_context *brw,
			 const struct _mesa_index_buffer *index_buffer,
			 dri_bo **bo_return,
			 GLuint *offset_return)
{
   GLcontext *ctx = &brw->intel.ctx;
   struct intel_context *intel = &brw->intel;
   GLuint ib_size = get_size(index_buffer->type) * index_buffer->count;
   dri_bo *bo;
   struct gl_buffer_object *bufferobj = index_buffer->obj;
   GLuint offset = (GLuint)index_buffer->ptr;
   int ret;

   /* Turn into a proper VBO:
    */
   if (!bufferobj->Name) {
     
      /* Get new bufferobj, offset:
       */
      get_space(brw, ib_size, &bo, &offset);

      /* Straight upload
       */
      dri_bo_subdata(bo, offset, ib_size, index_buffer->ptr);
   } else {
      /* If the index buffer isn't aligned to its element size, we have to
       * rebase it into a temporary.
       */
       if ((get_size(index_buffer->type) - 1) & offset) {
           GLubyte *map = ctx->Driver.MapBuffer(ctx,
                                                GL_ELEMENT_ARRAY_BUFFER_ARB,
                                                GL_DYNAMIC_DRAW_ARB,
                                                bufferobj);
           map += offset;

	   get_space(brw, ib_size, &bo, &offset);

	   dri_bo_subdata(bo, offset, ib_size, map);

           ctx->Driver.UnmapBuffer(ctx, GL_ELEMENT_ARRAY_BUFFER_ARB, bufferobj);
       } else {
	  bo = intel_bufferobj_buffer(intel, intel_buffer_object(bufferobj),
				      INTEL_READ);
	  dri_bo_reference(bo);
       }
   }

   *bo_return = bo;
   *offset_return = offset;
   ret = dri_bufmgr_check_aperture_space(bo);
   return ret;
}
/**
 * Called via glGetBufferSubDataARB().
 */
static void intel_bufferobj_get_subdata( GLcontext *ctx,
        GLenum target,
        GLintptrARB offset,
        GLsizeiptrARB size,
        GLvoid * data,
        struct gl_buffer_object * obj )
{
    struct intel_context *intel = intel_context(ctx);
    struct intel_buffer_object *intel_obj = intel_buffer_object(obj);

    assert(intel_obj);
    bmBufferGetSubData(intel->bm, intel_obj->buffer, offset, size, data);
}
/**
 * Deallocate/free a vertex/pixel buffer object.
 * Called via glDeleteBuffersARB().
 */
static void intel_bufferobj_free( GLcontext *ctx,
                                  struct gl_buffer_object *obj )
{
    struct intel_context *intel = intel_context(ctx);
    struct intel_buffer_object *intel_obj = intel_buffer_object(obj);

    assert(intel_obj);

    if (intel_obj->buffer)
        bmDeleteBuffers( intel->bm, 1, &intel_obj->buffer );

    _mesa_free(intel_obj);
}
/**
 * Called via glMapBufferARB().
 */
static GLboolean intel_bufferobj_unmap( GLcontext *ctx,
                                        GLenum target,
                                        struct gl_buffer_object *obj )
{
    struct intel_context *intel = intel_context(ctx);
    struct intel_buffer_object *intel_obj = intel_buffer_object(obj);

    assert(intel_obj);
    assert(obj->Pointer);
    bmUnmapBuffer(intel->bm, intel_obj->buffer);
    obj->Pointer = NULL;
    return GL_TRUE;
}
/**
 * Called via glMapBufferARB().
 */
static void *intel_bufferobj_map( GLcontext *ctx,
                                  GLenum target,
                                  GLenum access,
                                  struct gl_buffer_object *obj )
{
    struct intel_context *intel = intel_context(ctx);
    struct intel_buffer_object *intel_obj = intel_buffer_object(obj);

    /* XXX: Translate access to flags arg below:
     */
    assert(intel_obj);
    obj->Pointer = bmMapBuffer(intel->bm, intel_obj->buffer, 0);
    return obj->Pointer;
}
/**
 * Allocate space for and store data in a buffer object.  Any data that was
 * previously stored in the buffer object is lost.  If data is NULL,
 * memory will be allocated, but no copy will occur.
 * Called via ctx->Driver.BufferData().
 * \return GL_TRUE for success, GL_FALSE if out of memory
 */
static GLboolean
intel_bufferobj_data(GLcontext * ctx,
                     GLenum target,
                     GLsizeiptrARB size,
                     const GLvoid * data,
                     GLenum usage, struct gl_buffer_object *obj)
{
   struct intel_context *intel = intel_context(ctx);
   struct intel_buffer_object *intel_obj = intel_buffer_object(obj);

   intel_obj->Base.Size = size;
   intel_obj->Base.Usage = usage;

   assert(!obj->Pointer); /* Mesa should have unmapped it */

   if (intel_obj->region)
      intel_bufferobj_release_region(intel, intel_obj);

   if (intel_obj->buffer != NULL) {
      dri_bo_unreference(intel_obj->buffer);
      intel_obj->buffer = NULL;
   }
   _mesa_free(intel_obj->sys_buffer);
   intel_obj->sys_buffer = NULL;

   if (size != 0) {
#ifdef I915
      /* On pre-965, stick VBOs in system memory, as we're always doing swtnl
       * with their contents anyway.
       */
      if (target == GL_ARRAY_BUFFER || target == GL_ELEMENT_ARRAY_BUFFER) {
	 intel_obj->sys_buffer = _mesa_malloc(size);
	 if (intel_obj->sys_buffer != NULL) {
	    if (data != NULL)
	       memcpy(intel_obj->sys_buffer, data, size);
	    return GL_TRUE;
	 }
      }
#endif
      intel_bufferobj_alloc_buffer(intel, intel_obj);
      if (!intel_obj->buffer)
         return GL_FALSE;

      if (data != NULL)
	 dri_bo_subdata(intel_obj->buffer, 0, size, data);
   }

   return GL_TRUE;
}
/**
 * Called via glGetBufferSubDataARB().
 */
static void
intel_bufferobj_get_subdata(GLcontext * ctx,
                            GLenum target,
                            GLintptrARB offset,
                            GLsizeiptrARB size,
                            GLvoid * data, struct gl_buffer_object *obj)
{
   struct intel_buffer_object *intel_obj = intel_buffer_object(obj);

   assert(intel_obj);
   if (intel_obj->sys_buffer)
      memcpy(data, (char *)intel_obj->sys_buffer + offset, size);
   else
      dri_bo_get_subdata(intel_obj->buffer, offset, size, data);
}
Exemple #24
0
/**
 * The DeleteBuffer() driver hook.
 *
 * Deletes a single OpenGL buffer object.  Used by glDeleteBuffers().
 */
static void
brw_delete_buffer(struct gl_context * ctx, struct gl_buffer_object *obj)
{
   struct intel_buffer_object *intel_obj = intel_buffer_object(obj);

   assert(intel_obj);

   /* Buffer objects are automatically unmapped when deleting according
    * to the spec, but Mesa doesn't do UnmapBuffer for us at context destroy
    * (though it does if you call glDeleteBuffers)
    */
   _mesa_buffer_unmap_all_mappings(ctx, obj);

   drm_intel_bo_unreference(intel_obj->buffer);
   free(intel_obj);
}
Exemple #25
0
static GLenum
intel_buffer_object_purgeable(struct gl_context * ctx,
                              struct gl_buffer_object *obj,
                              GLenum option)
{
   struct intel_buffer_object *intel_obj = intel_buffer_object(obj);

   if (intel_obj->buffer != NULL)
      return intel_buffer_purgeable(intel_obj->buffer);

   if (option == GL_RELEASED_APPLE) {
      return GL_RELEASED_APPLE;
   } else {
      /* XXX Create the buffer and madvise(MADV_DONTNEED)? */
      return intel_buffer_purgeable(intel_obj->buffer);
   }
}
Exemple #26
0
/**
 * Called via glGetBufferSubDataARB().
 */
static void
intel_bufferobj_get_subdata(struct gl_context * ctx,
                            GLintptrARB offset,
                            GLsizeiptrARB size,
                            GLvoid * data, struct gl_buffer_object *obj)
{
   struct intel_buffer_object *intel_obj = intel_buffer_object(obj);
   struct intel_context *intel = intel_context(ctx);

   assert(intel_obj);
   if (intel_obj->sys_buffer)
      memcpy(data, (char *)intel_obj->sys_buffer + offset, size);
   else {
      if (drm_intel_bo_references(intel->batch.bo, intel_obj->buffer)) {
	 intel_batchbuffer_flush(intel);
      }
      drm_intel_bo_get_subdata(intel_obj->buffer, offset, size, data);
   }
}
/**
 * Allocate space for and store data in a buffer object.  Any data that was
 * previously stored in the buffer object is lost.  If data is NULL,
 * memory will be allocated, but no copy will occur.
 * Called via glBufferDataARB().
 */
static void intel_bufferobj_data( GLcontext *ctx,
                                  GLenum target,
                                  GLsizeiptrARB size,
                                  const GLvoid *data,
                                  GLenum usage,
                                  struct gl_buffer_object *obj )
{
    struct intel_context *intel = intel_context(ctx);
    struct intel_buffer_object *intel_obj = intel_buffer_object(obj);

    /* XXX: do something useful with 'usage' (eg. populate flags
     * argument below)
     */
    assert(intel_obj);

    obj->Size = size;
    obj->Usage = usage;

    bmBufferData(intel->bm, intel_obj->buffer, size, data, 0);
}
static GLboolean
try_pbo_zcopy(struct intel_context *intel,
              struct intel_texture_image *intelImage,
              const struct gl_pixelstore_attrib *unpack,
              GLint internalFormat,
              GLint width, GLint height,
              GLenum format, GLenum type, const void *pixels)
{
   struct intel_buffer_object *pbo = intel_buffer_object(unpack->BufferObj);
   GLuint src_offset, src_stride;
   GLuint dst_offset, dst_stride;

   if (!pbo ||
       intel->ctx._ImageTransferState ||
       unpack->SkipPixels || unpack->SkipRows) {
      _mesa_printf("%s: failure 1\n", __FUNCTION__);
      return GL_FALSE;
   }

   src_offset = (GLuint) pixels;

   if (unpack->RowLength > 0)
      src_stride = unpack->RowLength;
   else
      src_stride = width;

   dst_offset = intel_miptree_image_offset(intelImage->mt,
                                           intelImage->face,
                                           intelImage->level);

   dst_stride = intelImage->mt->pitch;

   if (src_stride != dst_stride || dst_offset != 0 || src_offset != 0) {
      _mesa_printf("%s: failure 2\n", __FUNCTION__);
      return GL_FALSE;
   }

   intel_region_attach_pbo(intel->intelScreen, intelImage->mt->region, pbo);

   return GL_TRUE;
}
/**
 * Replace data in a subrange of buffer object.  If the data range
 * specified by size + offset extends beyond the end of the buffer or
 * if data is NULL, no copy is performed.
 * Called via glBufferSubDataARB().
 */
static void
intel_bufferobj_subdata(GLcontext * ctx,
                        GLenum target,
                        GLintptrARB offset,
                        GLsizeiptrARB size,
                        const GLvoid * data, struct gl_buffer_object *obj)
{
   struct intel_context *intel = intel_context(ctx);
   struct intel_buffer_object *intel_obj = intel_buffer_object(obj);

   if (size == 0)
      return;

   assert(intel_obj);

   if (intel_obj->region)
      intel_bufferobj_cow(intel, intel_obj);

   if (intel_obj->sys_buffer)
      memcpy((char *)intel_obj->sys_buffer + offset, data, size);
   else {
      /* Flush any existing batchbuffer that might reference this data. */
      if (drm_intel_bo_busy(intel_obj->buffer) ||
	  drm_intel_bo_references(intel->batch->buf, intel_obj->buffer)) {
	 drm_intel_bo *temp_bo;

	 temp_bo = drm_intel_bo_alloc(intel->bufmgr, "subdata temp", size, 64);

	 drm_intel_bo_subdata(temp_bo, 0, size, data);

	 intel_emit_linear_blit(intel,
				intel_obj->buffer, offset,
				temp_bo, 0,
				size);

	 drm_intel_bo_unreference(temp_bo);
      } else {
	 drm_intel_bo_subdata(intel_obj->buffer, offset, size, data);
      }
   }
}
/**
 * The BufferSubData() driver hook.
 *
 * Implements glBufferSubData(), which replaces a portion of the data in a
 * buffer object.
 *
 * If the data range specified by (size + offset) extends beyond the end of
 * the buffer or if data is NULL, no copy is performed.
 */
static void
brw_buffer_subdata(struct gl_context *ctx,
                   GLintptrARB offset,
                   GLsizeiptrARB size,
                   const GLvoid *data,
                   struct gl_buffer_object *obj)
{
   struct brw_context *brw = brw_context(ctx);
   struct intel_buffer_object *intel_obj = intel_buffer_object(obj);
   bool busy;

   if (size == 0)
      return;

   assert(intel_obj);

   /* See if we can unsynchronized write the data into the user's BO. This
    * avoids GPU stalls in unfortunately common user patterns (uploading
    * sequentially into a BO, with draw calls in between each upload).
    *
    * Once we've hit this path, we mark this GL BO as preferring stalling to
    * blits, so that we can hopefully hit this path again in the future
    * (otherwise, an app that might occasionally stall but mostly not will end
    * up with blitting all the time, at the cost of bandwidth)
    */
   if (brw->has_llc) {
      if (offset + size <= intel_obj->gpu_active_start ||
          intel_obj->gpu_active_end <= offset) {
         drm_intel_gem_bo_map_unsynchronized(intel_obj->buffer);
         memcpy(intel_obj->buffer->virtual + offset, data, size);
         drm_intel_bo_unmap(intel_obj->buffer);

         if (intel_obj->gpu_active_end > intel_obj->gpu_active_start)
            intel_obj->prefer_stall_to_blit = true;
         return;
      }
   }