Example #1
0
struct pipe_resource *
nouveau_buffer_create(struct pipe_screen *pscreen,
                      const struct pipe_resource *templ)
{
   struct nouveau_screen *screen = nouveau_screen(pscreen);
   struct nv04_resource *buffer;
   boolean ret;

   buffer = CALLOC_STRUCT(nv04_resource);
   if (!buffer)
      return NULL;

   buffer->base = *templ;
   buffer->vtbl = &nouveau_buffer_vtbl;
   pipe_reference_init(&buffer->base.reference, 1);
   buffer->base.screen = pscreen;

   if (buffer->base.bind &
       (screen->vidmem_bindings & screen->sysmem_bindings)) {
      switch (buffer->base.usage) {
      case PIPE_USAGE_DEFAULT:
      case PIPE_USAGE_IMMUTABLE:
      case PIPE_USAGE_STATIC:
         buffer->domain = NOUVEAU_BO_VRAM;
         break;
      case PIPE_USAGE_DYNAMIC:
         /* For most apps, we'd have to do staging transfers to avoid sync
          * with this usage, and GART -> GART copies would be suboptimal.
          */
         buffer->domain = NOUVEAU_BO_VRAM;
         break;
      case PIPE_USAGE_STAGING:
      case PIPE_USAGE_STREAM:
         buffer->domain = NOUVEAU_BO_GART;
         break;
      default:
         assert(0);
         break;
      }
   } else {
      if (buffer->base.bind & screen->vidmem_bindings)
         buffer->domain = NOUVEAU_BO_VRAM;
      else
      if (buffer->base.bind & screen->sysmem_bindings)
         buffer->domain = NOUVEAU_BO_GART;
   }
   ret = nouveau_buffer_allocate(screen, buffer, buffer->domain);

   if (ret == FALSE)
      goto fail;

   if (buffer->domain == NOUVEAU_BO_VRAM && screen->hint_buf_keep_sysmem_copy)
      nouveau_buffer_cache(NULL, buffer);

   return &buffer->base;

fail:
   FREE(buffer);
   return NULL;
}
Example #2
0
void *
nouveau_resource_map_offset(struct nouveau_context *nv,
                            struct nv04_resource *res, uint32_t offset,
                            uint32_t flags)
{
   if (unlikely(res->status & NOUVEAU_BUFFER_STATUS_USER_MEMORY))
      return res->data + offset;

   if (res->domain == NOUVEAU_BO_VRAM) {
      if (!res->data || (res->status & NOUVEAU_BUFFER_STATUS_GPU_WRITING))
         nouveau_buffer_cache(nv, res);
   }
   if (res->domain != NOUVEAU_BO_GART)
      return res->data + offset;

   if (res->mm) {
      unsigned rw;
      rw = (flags & NOUVEAU_BO_WR) ? PIPE_TRANSFER_WRITE : PIPE_TRANSFER_READ;
      nouveau_buffer_sync(res, rw);
      if (nouveau_bo_map(res->bo, 0, NULL))
         return NULL;
   } else {
      if (nouveau_bo_map(res->bo, flags, nv->client))
         return NULL;
   }
   return (uint8_t *)res->bo->map + res->offset + offset;
}
Example #3
0
/* Returns a pointer to a memory area representing a window into the
 * resource's data.
 *
 * This may or may not be the _actual_ memory area of the resource. However
 * when calling nouveau_buffer_transfer_unmap, if it wasn't the actual memory
 * area, the contents of the returned map are copied over to the resource.
 *
 * The usage indicates what the caller plans to do with the map:
 *
 *   WRITE means that the user plans to write to it
 *
 *   READ means that the user plans on reading from it
 *
 *   DISCARD_WHOLE_RESOURCE means that the whole resource is going to be
 *   potentially overwritten, and even if it isn't, the bits that aren't don't
 *   need to be maintained.
 *
 *   DISCARD_RANGE means that all the data in the specified range is going to
 *   be overwritten.
 *
 * The strategy for determining what kind of memory area to return is complex,
 * see comments inside of the function.
 */
static void *
nouveau_buffer_transfer_map(struct pipe_context *pipe,
                            struct pipe_resource *resource,
                            unsigned level, unsigned usage,
                            const struct pipe_box *box,
                            struct pipe_transfer **ptransfer)
{
   struct nouveau_context *nv = nouveau_context(pipe);
   struct nv04_resource *buf = nv04_resource(resource);
   struct nouveau_transfer *tx = MALLOC_STRUCT(nouveau_transfer);
   uint8_t *map;
   int ret;

   if (!tx)
      return NULL;
   nouveau_buffer_transfer_init(tx, resource, box, usage);
   *ptransfer = &tx->base;

   if (usage & PIPE_TRANSFER_READ)
      NOUVEAU_DRV_STAT(nv->screen, buf_transfers_rd, 1);
   if (usage & PIPE_TRANSFER_WRITE)
      NOUVEAU_DRV_STAT(nv->screen, buf_transfers_wr, 1);

   /* If we are trying to write to an uninitialized range, the user shouldn't
    * care what was there before. So we can treat the write as if the target
    * range were being discarded. Furthermore, since we know that even if this
    * buffer is busy due to GPU activity, because the contents were
    * uninitialized, the GPU can't care what was there, and so we can treat
    * the write as being unsynchronized.
    */
   if ((usage & PIPE_TRANSFER_WRITE) &&
       !util_ranges_intersect(&buf->valid_buffer_range, box->x, box->x + box->width))
      usage |= PIPE_TRANSFER_DISCARD_RANGE | PIPE_TRANSFER_UNSYNCHRONIZED;

   if (usage & PIPE_TRANSFER_PERSISTENT)
      usage |= PIPE_TRANSFER_UNSYNCHRONIZED;

   if (buf->domain == NOUVEAU_BO_VRAM) {
      if (usage & NOUVEAU_TRANSFER_DISCARD) {
         /* Set up a staging area for the user to write to. It will be copied
          * back into VRAM on unmap. */
         if (usage & PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE)
            buf->status &= NOUVEAU_BUFFER_STATUS_REALLOC_MASK;
         nouveau_transfer_staging(nv, tx, TRUE);
      } else {
         if (buf->status & NOUVEAU_BUFFER_STATUS_GPU_WRITING) {
            /* The GPU is currently writing to this buffer. Copy its current
             * contents to a staging area in the GART. This is necessary since
             * not the whole area being mapped is being discarded.
             */
            if (buf->data) {
               align_free(buf->data);
               buf->data = NULL;
            }
            nouveau_transfer_staging(nv, tx, FALSE);
            nouveau_transfer_read(nv, tx);
         } else {
            /* The buffer is currently idle. Create a staging area for writes,
             * and make sure that the cached data is up-to-date. */
            if (usage & PIPE_TRANSFER_WRITE)
               nouveau_transfer_staging(nv, tx, TRUE);
            if (!buf->data)
               nouveau_buffer_cache(nv, buf);
         }
      }
      return buf->data ? (buf->data + box->x) : tx->map;
   } else
   if (unlikely(buf->domain == 0)) {
      return buf->data + box->x;
   }

   /* At this point, buf->domain == GART */

   if (nouveau_buffer_should_discard(buf, usage)) {
      int ref = buf->base.reference.count - 1;
      nouveau_buffer_reallocate(nv->screen, buf, buf->domain);
      if (ref > 0) /* any references inside context possible ? */
         nv->invalidate_resource_storage(nv, &buf->base, ref);
   }

   /* Note that nouveau_bo_map ends up doing a nouveau_bo_wait with the
    * relevant flags. If buf->mm is set, that means this resource is part of a
    * larger slab bo that holds multiple resources. So in that case, don't
    * wait on the whole slab and instead use the logic below to return a
    * reasonable buffer for that case.
    */
   ret = nouveau_bo_map(buf->bo,
                        buf->mm ? 0 : nouveau_screen_transfer_flags(usage),
                        nv->client);
   if (ret) {
      FREE(tx);
      return NULL;
   }
   map = (uint8_t *)buf->bo->map + buf->offset + box->x;

   /* using kernel fences only if !buf->mm */
   if ((usage & PIPE_TRANSFER_UNSYNCHRONIZED) || !buf->mm)
      return map;

   /* If the GPU is currently reading/writing this buffer, we shouldn't
    * interfere with its progress. So instead we either wait for the GPU to
    * complete its operation, or set up a staging area to perform our work in.
    */
   if (nouveau_buffer_busy(buf, usage & PIPE_TRANSFER_READ_WRITE)) {
      if (unlikely(usage & PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE)) {
         /* Discarding was not possible, must sync because
          * subsequent transfers might use UNSYNCHRONIZED. */
         nouveau_buffer_sync(buf, usage & PIPE_TRANSFER_READ_WRITE);
      } else
      if (usage & PIPE_TRANSFER_DISCARD_RANGE) {
         /* The whole range is being discarded, so it doesn't matter what was
          * there before. No need to copy anything over. */
         nouveau_transfer_staging(nv, tx, TRUE);
         map = tx->map;
      } else
      if (nouveau_buffer_busy(buf, PIPE_TRANSFER_READ)) {
         if (usage & PIPE_TRANSFER_DONTBLOCK)
            map = NULL;
         else
            nouveau_buffer_sync(buf, usage & PIPE_TRANSFER_READ_WRITE);
      } else {
         /* It is expected that the returned buffer be a representation of the
          * data in question, so we must copy it over from the buffer. */
         nouveau_transfer_staging(nv, tx, TRUE);
         if (tx->map)
            memcpy(tx->map, map, box->width);
         map = tx->map;
      }
   }
   if (!map)
      FREE(tx);
   return map;
}
Example #4
0
static void *
nouveau_buffer_transfer_map(struct pipe_context *pipe,
                            struct pipe_resource *resource,
                            unsigned level, unsigned usage,
                            const struct pipe_box *box,
                            struct pipe_transfer **ptransfer)
{
   struct nouveau_context *nv = nouveau_context(pipe);
   struct nv04_resource *buf = nv04_resource(resource);
   struct nouveau_transfer *tx = MALLOC_STRUCT(nouveau_transfer);
   uint8_t *map;
   int ret;

   if (!tx)
      return NULL;
   nouveau_buffer_transfer_init(tx, resource, box, usage);
   *ptransfer = &tx->base;

   if (buf->domain == NOUVEAU_BO_VRAM) {
      if (usage & NOUVEAU_TRANSFER_DISCARD) {
         if (usage & PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE)
            buf->status &= NOUVEAU_BUFFER_STATUS_REALLOC_MASK;
         nouveau_transfer_staging(nv, tx, TRUE);
      } else {
         if (buf->status & NOUVEAU_BUFFER_STATUS_GPU_WRITING) {
            if (buf->data) {
               align_free(buf->data);
               buf->data = NULL;
            }
            nouveau_transfer_staging(nv, tx, FALSE);
            nouveau_transfer_read(nv, tx);
         } else {
            if (usage & PIPE_TRANSFER_WRITE)
               nouveau_transfer_staging(nv, tx, TRUE);
            if (!buf->data)
               nouveau_buffer_cache(nv, buf);
         }
      }
      return buf->data ? (buf->data + box->x) : tx->map;
   } else
   if (unlikely(buf->domain == 0)) {
      return buf->data + box->x;
   }

   if (nouveau_buffer_should_discard(buf, usage)) {
      int ref = buf->base.reference.count - 1;
      nouveau_buffer_reallocate(nv->screen, buf, buf->domain);
      if (ref > 0) /* any references inside context possible ? */
         nv->invalidate_resource_storage(nv, &buf->base, ref);
   }

   ret = nouveau_bo_map(buf->bo,
                        buf->mm ? 0 : nouveau_screen_transfer_flags(usage),
                        nv->client);
   if (ret) {
      FREE(tx);
      return NULL;
   }
   map = (uint8_t *)buf->bo->map + buf->offset + box->x;

   /* using kernel fences only if !buf->mm */
   if ((usage & PIPE_TRANSFER_UNSYNCHRONIZED) || !buf->mm)
      return map;

   if (nouveau_buffer_busy(buf, usage & PIPE_TRANSFER_READ_WRITE)) {
      if (unlikely(usage & PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE)) {
         /* Discarding was not possible, must sync because
          * subsequent transfers might use UNSYNCHRONIZED. */
         nouveau_buffer_sync(buf, usage & PIPE_TRANSFER_READ_WRITE);
      } else
      if (usage & PIPE_TRANSFER_DISCARD_RANGE) {
         nouveau_transfer_staging(nv, tx, TRUE);
         map = tx->map;
      } else
      if (nouveau_buffer_busy(buf, PIPE_TRANSFER_READ)) {
         if (usage & PIPE_TRANSFER_DONTBLOCK)
            map = NULL;
         else
            nouveau_buffer_sync(buf, usage & PIPE_TRANSFER_READ_WRITE);
      } else {
         nouveau_transfer_staging(nv, tx, TRUE);
         if (tx->map)
            memcpy(tx->map, map, box->width);
         map = tx->map;
      }
   }
   if (!map)
      FREE(tx);
   return map;
}
Example #5
0
struct pipe_resource *
nouveau_buffer_create(struct pipe_screen *pscreen,
                      const struct pipe_resource *templ)
{
   struct nouveau_screen *screen = nouveau_screen(pscreen);
   struct nv04_resource *buffer;
   bool ret;

   buffer = CALLOC_STRUCT(nv04_resource);
   if (!buffer)
      return NULL;

   buffer->base = *templ;
   buffer->vtbl = &nouveau_buffer_vtbl;
   pipe_reference_init(&buffer->base.reference, 1);
   buffer->base.screen = pscreen;

   if (buffer->base.flags & (PIPE_RESOURCE_FLAG_MAP_PERSISTENT |
                             PIPE_RESOURCE_FLAG_MAP_COHERENT)) {
      buffer->domain = NOUVEAU_BO_GART;
   } else if (buffer->base.bind == 0 || (buffer->base.bind &
              (screen->vidmem_bindings & screen->sysmem_bindings))) {
      switch (buffer->base.usage) {
      case PIPE_USAGE_DEFAULT:
      case PIPE_USAGE_IMMUTABLE:
         buffer->domain = NV_VRAM_DOMAIN(screen);
         break;
      case PIPE_USAGE_DYNAMIC:
         /* For most apps, we'd have to do staging transfers to avoid sync
          * with this usage, and GART -> GART copies would be suboptimal.
          */
         buffer->domain = NV_VRAM_DOMAIN(screen);
         break;
      case PIPE_USAGE_STAGING:
      case PIPE_USAGE_STREAM:
         buffer->domain = NOUVEAU_BO_GART;
         break;
      default:
         assert(0);
         break;
      }
   } else {
      if (buffer->base.bind & screen->vidmem_bindings)
         buffer->domain = NV_VRAM_DOMAIN(screen);
      else
      if (buffer->base.bind & screen->sysmem_bindings)
         buffer->domain = NOUVEAU_BO_GART;
   }
   /* There can be very special situations where we want non-gpu-mapped
    * buffers, but never through this interface.
    */
   assert(buffer->domain);
   ret = nouveau_buffer_allocate(screen, buffer, buffer->domain);

   if (ret == false)
      goto fail;

   if (buffer->domain == NOUVEAU_BO_VRAM && screen->hint_buf_keep_sysmem_copy)
      nouveau_buffer_cache(NULL, buffer);

   NOUVEAU_DRV_STAT(screen, buf_obj_current_count, 1);

   util_range_init(&buffer->valid_buffer_range);

   return &buffer->base;

fail:
   FREE(buffer);
   return NULL;
}