static void *
nouveau_bufferobj_map_range(GLcontext *ctx, GLenum target, GLintptr offset,
			    GLsizeiptr length, GLenum access,
			    struct gl_buffer_object *obj)
{
	struct nouveau_bufferobj *nbo = to_nouveau_bufferobj(obj);
	uint32_t flags = 0;

	assert(!obj->Pointer);

	if (!nbo->bo)
		return NULL;

	if (access == GL_READ_ONLY_ARB ||
	    access == GL_READ_WRITE_ARB)
		flags |= NOUVEAU_BO_RD;
	if (access == GL_WRITE_ONLY_ARB ||
	    access == GL_READ_WRITE_ARB)
		flags |= NOUVEAU_BO_WR;

	nouveau_bo_map_range(nbo->bo, offset, length, flags);

	obj->Pointer = nbo->bo->map;
	obj->Offset = offset;
	obj->Length = length;
	obj->AccessFlags = access;

	return obj->Pointer;
}
示例#2
0
/* Maybe just migrate to GART right away if we actually need to do this. */
boolean
nouveau_buffer_download(struct nouveau_context *nv, struct nv04_resource *buf,
                        unsigned start, unsigned size)
{
   struct nouveau_mm_allocation *mm;
   struct nouveau_bo *bounce = NULL;
   uint32_t offset;

   assert(buf->domain == NOUVEAU_BO_VRAM);

   mm = nouveau_mm_allocate(nv->screen->mm_GART, size, &bounce, &offset);
   if (!bounce)
      return FALSE;

   nv->copy_data(nv, bounce, offset, NOUVEAU_BO_GART,
                 buf->bo, buf->offset + start, NOUVEAU_BO_VRAM, size);

   if (nouveau_bo_map_range(bounce, offset, size, NOUVEAU_BO_RD))
      return FALSE;
   memcpy(buf->data + start, bounce->map, size);
   nouveau_bo_unmap(bounce);

   buf->status &= ~NOUVEAU_BUFFER_STATUS_GPU_WRITING;

   nouveau_bo_ref(NULL, &bounce);
   if (mm)
      nouveau_mm_free(mm);
   return TRUE;
}
示例#3
0
/* Migrate a linear buffer (vertex, index, constants) USER -> GART -> VRAM. */
boolean
nouveau_buffer_migrate(struct nouveau_context *nv,
                       struct nv04_resource *buf, const unsigned new_domain)
{
   struct nouveau_screen *screen = nv->screen;
   struct nouveau_bo *bo;
   const unsigned old_domain = buf->domain;
   unsigned size = buf->base.width0;
   unsigned offset;
   int ret;

   assert(new_domain != old_domain);

   if (new_domain == NOUVEAU_BO_GART && old_domain == 0) {
      if (!nouveau_buffer_allocate(screen, buf, new_domain))
         return FALSE;
      ret = nouveau_bo_map_range(buf->bo, buf->offset, size, NOUVEAU_BO_WR |
                                 NOUVEAU_BO_NOSYNC);
      if (ret)
         return ret;
      memcpy(buf->bo->map, buf->data, size);
      nouveau_bo_unmap(buf->bo);
      FREE(buf->data);
   } else
   if (old_domain != 0 && new_domain != 0) {
      struct nouveau_mm_allocation *mm = buf->mm;

      if (new_domain == NOUVEAU_BO_VRAM) {
         /* keep a system memory copy of our data in case we hit a fallback */
         if (!nouveau_buffer_data_fetch(buf, buf->bo, buf->offset, size))
            return FALSE;
         if (nouveau_mesa_debug)
            debug_printf("migrating %u KiB to VRAM\n", size / 1024);
      }

      offset = buf->offset;
      bo = buf->bo;
      buf->bo = NULL;
      buf->mm = NULL;
      nouveau_buffer_allocate(screen, buf, new_domain);

      nv->copy_data(nv, buf->bo, buf->offset, new_domain,
                    bo, offset, old_domain, buf->base.width0);

      nouveau_bo_ref(NULL, &bo);
      if (mm)
         release_allocation(&mm, screen->fence.current);
   } else
   if (new_domain == NOUVEAU_BO_VRAM && old_domain == 0) {
      if (!nouveau_buffer_allocate(screen, buf, NOUVEAU_BO_VRAM))
         return FALSE;
      if (!nouveau_buffer_upload(nv, buf, 0, buf->base.width0))
         return FALSE;
   } else
      return FALSE;

   assert(buf->domain == new_domain);
   return TRUE;
}
void *
nouveau_screen_bo_map_range(struct pipe_screen *pscreen, struct nouveau_bo *bo,
			    unsigned offset, unsigned length, unsigned flags)
{
	int ret;

	ret = nouveau_bo_map_range(bo, offset, length, flags);
	if (ret) {
		nouveau_bo_unmap(bo);
		if (!(flags & NOUVEAU_BO_NOWAIT) || ret != -EBUSY)
			debug_printf("map_range failed: %d\n", ret);
		return NULL;
	}

	return (char *)bo->map - offset; /* why gallium? why? */
}
示例#5
0
/* Like download, but for GART buffers. Merge ? */
static INLINE boolean
nouveau_buffer_data_fetch(struct nv04_resource *buf, struct nouveau_bo *bo,
                          unsigned offset, unsigned size)
{
   if (!buf->data) {
      buf->data = MALLOC(size);
      if (!buf->data)
         return FALSE;
   }
   if (nouveau_bo_map_range(bo, offset, size, NOUVEAU_BO_RD))
      return FALSE;
   memcpy(buf->data, bo->map, size);
   nouveau_bo_unmap(bo);

   return TRUE;
}
示例#6
0
static void *
nouveau_buffer_transfer_map(struct pipe_context *pipe,
                            struct pipe_transfer *transfer)
{
   struct nouveau_transfer *xfr = nouveau_transfer(transfer);
   struct nv04_resource *buf = nv04_resource(transfer->resource);
   struct nouveau_bo *bo = buf->bo;
   uint8_t *map;
   int ret;
   uint32_t offset = xfr->base.box.x;
   uint32_t flags;

   nouveau_buffer_adjust_score(nouveau_context(pipe), buf, -250);

   if (buf->domain != NOUVEAU_BO_GART)
      return buf->data + offset;

   if (buf->mm)
      flags = NOUVEAU_BO_NOSYNC | NOUVEAU_BO_RDWR;
   else
      flags = nouveau_screen_transfer_flags(xfr->base.usage);

   offset += buf->offset;

   ret = nouveau_bo_map_range(buf->bo, offset, xfr->base.box.width, flags);
   if (ret)
      return NULL;
   map = bo->map;

   /* Unmap right now. Since multiple buffers can share a single nouveau_bo,
    * not doing so might make future maps fail or trigger "reloc while mapped"
    * errors. For now, mappings to userspace are guaranteed to be persistent.
    */
   nouveau_bo_unmap(bo);

   if (buf->mm) {
      if (xfr->base.usage & PIPE_TRANSFER_DONTBLOCK) {
         if (nouveau_buffer_busy(buf, xfr->base.usage & PIPE_TRANSFER_READ_WRITE))
            return NULL;
      } else
      if (!(xfr->base.usage & PIPE_TRANSFER_UNSYNCHRONIZED)) {
         nouveau_buffer_sync(buf, xfr->base.usage & PIPE_TRANSFER_READ_WRITE);
      }
   }
   return map;
}
示例#7
0
/* Migrate data from glVertexAttribPointer(non-VBO) user buffers to GART.
 * We'd like to only allocate @size bytes here, but then we'd have to rebase
 * the vertex indices ...
 */
boolean
nouveau_user_buffer_upload(struct nv04_resource *buf,
                           unsigned base, unsigned size)
{
   struct nouveau_screen *screen = nouveau_screen(buf->base.screen);
   int ret;

   assert(buf->status & NOUVEAU_BUFFER_STATUS_USER_MEMORY);

   buf->base.width0 = base + size;
   if (!nouveau_buffer_reallocate(screen, buf, NOUVEAU_BO_GART))
      return FALSE;

   ret = nouveau_bo_map_range(buf->bo, buf->offset + base, size,
                              NOUVEAU_BO_WR | NOUVEAU_BO_NOSYNC);
   if (ret)
      return FALSE;
   memcpy(buf->bo->map, buf->data + base, size);
   nouveau_bo_unmap(buf->bo);

   return TRUE;
}
示例#8
0
static boolean
nouveau_buffer_upload(struct nouveau_context *nv, struct nv04_resource *buf,
                      unsigned start, unsigned size)
{
   struct nouveau_mm_allocation *mm;
   struct nouveau_bo *bounce = NULL;
   uint32_t offset;

   if (size <= 192) {
      if (buf->base.bind & PIPE_BIND_CONSTANT_BUFFER)
         nv->push_cb(nv, buf->bo, buf->domain, buf->offset, buf->base.width0,
                     start, size / 4, (const uint32_t *)(buf->data + start));
      else
         nv->push_data(nv, buf->bo, buf->offset + start, buf->domain,
                       size, buf->data + start);
      return TRUE;
   }

   mm = nouveau_mm_allocate(nv->screen->mm_GART, size, &bounce, &offset);
   if (!bounce)
      return FALSE;

   nouveau_bo_map_range(bounce, offset, size,
                        NOUVEAU_BO_WR | NOUVEAU_BO_NOSYNC);
   memcpy(bounce->map, buf->data + start, size);
   nouveau_bo_unmap(bounce);

   nv->copy_data(nv, buf->bo, buf->offset + start, NOUVEAU_BO_VRAM,
                 bounce, offset, NOUVEAU_BO_GART, size);

   nouveau_bo_ref(NULL, &bounce);
   if (mm)
      release_allocation(&mm, nv->screen->fence.current);

   if (start == 0 && size == buf->base.width0)
      buf->status &= ~NOUVEAU_BUFFER_STATUS_GPU_WRITING;
   return TRUE;
}