static struct pipe_stream_output_target *
nv50_so_target_create(struct pipe_context *pipe,
                      struct pipe_resource *res,
                      unsigned offset, unsigned size)
{
   struct nv04_resource *buf = (struct nv04_resource *)res;
   struct nv50_so_target *targ = MALLOC_STRUCT(nv50_so_target);
   if (!targ)
      return NULL;

   if (nouveau_context(pipe)->screen->class_3d >= NVA0_3D_CLASS) {
      targ->pq = pipe->create_query(pipe,
                                    NVA0_QUERY_STREAM_OUTPUT_BUFFER_OFFSET, 0);
      if (!targ->pq) {
         FREE(targ);
         return NULL;
      }
   } else {
      targ->pq = NULL;
   }
   targ->clean = TRUE;

   targ->pipe.buffer_size = size;
   targ->pipe.buffer_offset = offset;
   targ->pipe.context = pipe;
   targ->pipe.buffer = NULL;
   pipe_resource_reference(&targ->pipe.buffer, res);
   pipe_reference_init(&targ->pipe.reference, 1);

   assert(buf->base.target == PIPE_BUFFER);
   util_range_add(&buf->valid_buffer_range, offset, offset + size);

   return &targ->pipe;
}
Пример #2
0
static void
nouveau_buffer_transfer_unmap(struct pipe_context *pipe,
                              struct pipe_transfer *transfer)
{
   struct nouveau_context *nv = nouveau_context(pipe);
   struct nouveau_transfer *tx = nouveau_transfer(transfer);
   struct nv04_resource *buf = nv04_resource(transfer->resource);

   if (tx->base.usage & PIPE_TRANSFER_WRITE) {
      if (!(tx->base.usage & PIPE_TRANSFER_FLUSH_EXPLICIT) && tx->map)
         nouveau_transfer_write(nv, tx, 0, tx->base.box.width);

      if (likely(buf->domain)) {
         const uint8_t bind = buf->base.bind;
         /* make sure we invalidate dedicated caches */
         if (bind & (PIPE_BIND_VERTEX_BUFFER | PIPE_BIND_INDEX_BUFFER))
            nv->vbo_dirty = TRUE;
         if (bind & (PIPE_BIND_CONSTANT_BUFFER))
            nv->cb_dirty = TRUE;
      }
   }

   nouveau_buffer_transfer_del(nv, tx);
   FREE(tx);
}
static struct pipe_transfer *
nouveau_buffer_transfer_get(struct pipe_context *pipe,
                            struct pipe_resource *resource,
                            unsigned level, unsigned usage,
                            const struct pipe_box *box)
{
   struct nv04_resource *buf = nv04_resource(resource);
   struct nouveau_context *nv = nouveau_context(pipe);
   struct nouveau_transfer *xfr = CALLOC_STRUCT(nouveau_transfer);
   if (!xfr)
      return NULL;

   xfr->base.resource = resource;
   xfr->base.box.x = box->x;
   xfr->base.box.width = box->width;
   xfr->base.usage = usage;

   if (buf->domain == NOUVEAU_BO_VRAM) {
      if (usage & PIPE_TRANSFER_READ) {
         if (buf->status & NOUVEAU_BUFFER_STATUS_GPU_WRITING)
            nouveau_buffer_download(nv, buf, 0, buf->base.width0);
      }
   }

   return &xfr->base;
}
Пример #4
0
/* Unmap stage of the transfer. If it was a WRITE transfer and the map that
 * was returned was not the real resource's data, this needs to transfer the
 * data back to the resource.
 *
 * Also marks vbo dirty based on the buffer's binding
 */
static void
nouveau_buffer_transfer_unmap(struct pipe_context *pipe,
                              struct pipe_transfer *transfer)
{
   struct nouveau_context *nv = nouveau_context(pipe);
   struct nouveau_transfer *tx = nouveau_transfer(transfer);
   struct nv04_resource *buf = nv04_resource(transfer->resource);

   if (tx->base.usage & PIPE_TRANSFER_WRITE) {
      if (!(tx->base.usage & PIPE_TRANSFER_FLUSH_EXPLICIT) && tx->map)
         nouveau_transfer_write(nv, tx, 0, tx->base.box.width);

      if (likely(buf->domain)) {
         const uint8_t bind = buf->base.bind;
         /* make sure we invalidate dedicated caches */
         if (bind & (PIPE_BIND_VERTEX_BUFFER | PIPE_BIND_INDEX_BUFFER))
            nv->vbo_dirty = TRUE;
      }

      util_range_add(&buf->valid_buffer_range,
                     tx->base.box.x, tx->base.box.x + tx->base.box.width);
   }

   if (!tx->bo && (tx->base.usage & PIPE_TRANSFER_WRITE))
      NOUVEAU_DRV_STAT(nv->screen, buf_write_bytes_direct, tx->base.box.width);

   nouveau_buffer_transfer_del(nv, tx);
   FREE(tx);
}
Пример #5
0
/* Invalidate underlying buffer storage, reset fences, reallocate to non-busy
 * buffer.
 */
void
nouveau_buffer_invalidate(struct pipe_context *pipe,
                          struct pipe_resource *resource)
{
   struct nouveau_context *nv = nouveau_context(pipe);
   struct nv04_resource *buf = nv04_resource(resource);
   int ref = buf->base.reference.count - 1;

   /* Shared buffers shouldn't get reallocated */
   if (unlikely(buf->base.bind & PIPE_BIND_SHARED))
      return;

   /* We can't touch persistent/coherent buffers */
   if (buf->base.flags & (PIPE_RESOURCE_FLAG_MAP_PERSISTENT |
                          PIPE_RESOURCE_FLAG_MAP_COHERENT))
      return;

   /* If the buffer is sub-allocated and not currently being written, just
    * wipe the valid buffer range. Otherwise we have to create fresh
    * storage. (We don't keep track of fences for non-sub-allocated BO's.)
    */
   if (buf->mm && !nouveau_buffer_busy(buf, PIPE_TRANSFER_WRITE)) {
      util_range_set_empty(&buf->valid_buffer_range);
   } else {
      nouveau_buffer_reallocate(nv->screen, buf, buf->domain);
      if (ref > 0) /* any references inside context possible ? */
         nv->invalidate_resource_storage(nv, &buf->base, ref);
   }
}
Пример #6
0
static void
nouveau_buffer_transfer_flush_region(struct pipe_context *pipe,
                                     struct pipe_transfer *transfer,
                                     const struct pipe_box *box)
{
   struct nouveau_transfer *tx = nouveau_transfer(transfer);
   if (tx->map)
      nouveau_transfer_write(nouveau_context(pipe), tx, box->x, box->width);
}
static void
nouveau_buffer_transfer_destroy(struct pipe_context *pipe,
                                struct pipe_transfer *transfer)
{
   struct nv04_resource *buf = nv04_resource(transfer->resource);
   struct nouveau_transfer *xfr = nouveau_transfer(transfer);
   struct nouveau_context *nv = nouveau_context(pipe);

   if (xfr->base.usage & PIPE_TRANSFER_WRITE) {
      if (buf->domain == NOUVEAU_BO_VRAM) {
         nouveau_buffer_upload(nv, buf, transfer->box.x, transfer->box.width);
      }

      if (buf->domain != 0 && (buf->base.bind & (PIPE_BIND_VERTEX_BUFFER |
                                                 PIPE_BIND_INDEX_BUFFER)))
         nouveau_context(pipe)->vbo_dirty = TRUE;
   }

   FREE(xfr);
}
Пример #8
0
static void
nouveau_set_debug_callback(struct pipe_context *pipe,
                           const struct pipe_debug_callback *cb)
{
   struct nouveau_context *context = nouveau_context(pipe);

   if (cb)
      context->debug = *cb;
   else
      memset(&context->debug, 0, sizeof(context->debug));
}
Пример #9
0
static void
nv50_flush(struct pipe_context *pipe,
           struct pipe_fence_handle **fence,
           unsigned flags)
{
   struct nouveau_screen *screen = nouveau_screen(pipe->screen);

   if (fence)
      nouveau_fence_ref(screen->fence.current, (struct nouveau_fence **)fence);

   PUSH_KICK(screen->pushbuf);

   nouveau_context_update_frame_stats(nouveau_context(pipe));
}
Пример #10
0
static void
nouveau_buffer_transfer_flush_region(struct pipe_context *pipe,
                                     struct pipe_transfer *transfer,
                                     const struct pipe_box *box)
{
   struct nouveau_transfer *tx = nouveau_transfer(transfer);
   struct nv04_resource *buf = nv04_resource(transfer->resource);

   if (tx->map)
      nouveau_transfer_write(nouveau_context(pipe), tx, box->x, box->width);

   util_range_add(&buf->valid_buffer_range,
                  tx->base.box.x + box->x,
                  tx->base.box.x + box->x + box->width);
}
Пример #11
0
static void *
nouveau_buffer_transfer_map(struct pipe_context *pipe,
                            struct pipe_transfer *transfer)
{
   struct nouveau_transfer *xfr = nouveau_transfer(transfer);
   struct nv04_resource *buf = nv04_resource(transfer->resource);
   struct nouveau_bo *bo = buf->bo;
   uint8_t *map;
   int ret;
   uint32_t offset = xfr->base.box.x;
   uint32_t flags;

   nouveau_buffer_adjust_score(nouveau_context(pipe), buf, -250);

   if (buf->domain != NOUVEAU_BO_GART)
      return buf->data + offset;

   if (buf->mm)
      flags = NOUVEAU_BO_NOSYNC | NOUVEAU_BO_RDWR;
   else
      flags = nouveau_screen_transfer_flags(xfr->base.usage);

   offset += buf->offset;

   ret = nouveau_bo_map_range(buf->bo, offset, xfr->base.box.width, flags);
   if (ret)
      return NULL;
   map = bo->map;

   /* Unmap right now. Since multiple buffers can share a single nouveau_bo,
    * not doing so might make future maps fail or trigger "reloc while mapped"
    * errors. For now, mappings to userspace are guaranteed to be persistent.
    */
   nouveau_bo_unmap(bo);

   if (buf->mm) {
      if (xfr->base.usage & PIPE_TRANSFER_DONTBLOCK) {
         if (nouveau_buffer_busy(buf, xfr->base.usage & PIPE_TRANSFER_READ_WRITE))
            return NULL;
      } else
      if (!(xfr->base.usage & PIPE_TRANSFER_UNSYNCHRONIZED)) {
         nouveau_buffer_sync(buf, xfr->base.usage & PIPE_TRANSFER_READ_WRITE);
      }
   }
   return map;
}
Пример #12
0
static void *
nv50_sp_state_create(struct pipe_context *pipe,
                     const struct pipe_shader_state *cso, unsigned type)
{
   struct nv50_program *prog;

   prog = CALLOC_STRUCT(nv50_program);
   if (!prog)
      return NULL;

   prog->type = type;
   prog->pipe.tokens = tgsi_dup_tokens(cso->tokens);

   if (cso->stream_output.num_outputs)
      prog->pipe.stream_output = cso->stream_output;

   prog->translated = nv50_program_translate(
         prog, nv50_context(pipe)->screen->base.device->chipset,
         &nouveau_context(pipe)->debug);

   return (void *)prog;
}
Пример #13
0
static void *
nvc0_cp_state_create(struct pipe_context *pipe,
                     const struct pipe_compute_state *cso)
{
   struct nvc0_program *prog;

   prog = CALLOC_STRUCT(nvc0_program);
   if (!prog)
      return NULL;
   prog->type = PIPE_SHADER_COMPUTE;

   prog->cp.smem_size = cso->req_local_mem;
   prog->cp.lmem_size = cso->req_private_mem;
   prog->parm_size = cso->req_input_mem;

   prog->pipe.tokens = tgsi_dup_tokens((const struct tgsi_token *)cso->prog);

   prog->translated = nvc0_program_translate(
      prog, nvc0_context(pipe)->screen->base.device->chipset,
      &nouveau_context(pipe)->debug);

   return (void *)prog;
}
static void *
nouveau_buffer_transfer_map(struct pipe_context *pipe,
                            struct pipe_transfer *transfer)
{
   struct nouveau_context *nv = nouveau_context(pipe);
   struct nouveau_transfer *xfr = nouveau_transfer(transfer);
   struct nv04_resource *buf = nv04_resource(transfer->resource);
   struct nouveau_bo *bo = buf->bo;
   uint8_t *map;
   int ret;
   uint32_t offset = xfr->base.box.x;
   uint32_t flags = 0;

   if (buf->domain != NOUVEAU_BO_GART)
      return buf->data + offset;

   if (!buf->mm)
      flags = nouveau_screen_transfer_flags(xfr->base.usage);

   offset += buf->offset;

   ret = nouveau_bo_map(buf->bo, flags, nv->screen->client);
   if (ret)
      return NULL;
   map = (uint8_t *)bo->map + offset;

   if (buf->mm) {
      if (xfr->base.usage & PIPE_TRANSFER_DONTBLOCK) {
         if (nouveau_buffer_busy(buf, xfr->base.usage & PIPE_TRANSFER_READ_WRITE))
            return NULL;
      } else
      if (!(xfr->base.usage & PIPE_TRANSFER_UNSYNCHRONIZED)) {
         nouveau_buffer_sync(buf, xfr->base.usage & PIPE_TRANSFER_READ_WRITE);
      }
   }
   return map;
}
Пример #15
0
static void *
nouveau_buffer_transfer_map(struct pipe_context *pipe,
                            struct pipe_resource *resource,
                            unsigned level, unsigned usage,
                            const struct pipe_box *box,
                            struct pipe_transfer **ptransfer)
{
   struct nouveau_context *nv = nouveau_context(pipe);
   struct nv04_resource *buf = nv04_resource(resource);
   struct nouveau_transfer *tx = MALLOC_STRUCT(nouveau_transfer);
   uint8_t *map;
   int ret;

   if (!tx)
      return NULL;
   nouveau_buffer_transfer_init(tx, resource, box, usage);
   *ptransfer = &tx->base;

   if (buf->domain == NOUVEAU_BO_VRAM) {
      if (usage & NOUVEAU_TRANSFER_DISCARD) {
         if (usage & PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE)
            buf->status &= NOUVEAU_BUFFER_STATUS_REALLOC_MASK;
         nouveau_transfer_staging(nv, tx, TRUE);
      } else {
         if (buf->status & NOUVEAU_BUFFER_STATUS_GPU_WRITING) {
            if (buf->data) {
               align_free(buf->data);
               buf->data = NULL;
            }
            nouveau_transfer_staging(nv, tx, FALSE);
            nouveau_transfer_read(nv, tx);
         } else {
            if (usage & PIPE_TRANSFER_WRITE)
               nouveau_transfer_staging(nv, tx, TRUE);
            if (!buf->data)
               nouveau_buffer_cache(nv, buf);
         }
      }
      return buf->data ? (buf->data + box->x) : tx->map;
   } else
   if (unlikely(buf->domain == 0)) {
      return buf->data + box->x;
   }

   if (nouveau_buffer_should_discard(buf, usage)) {
      int ref = buf->base.reference.count - 1;
      nouveau_buffer_reallocate(nv->screen, buf, buf->domain);
      if (ref > 0) /* any references inside context possible ? */
         nv->invalidate_resource_storage(nv, &buf->base, ref);
   }

   ret = nouveau_bo_map(buf->bo,
                        buf->mm ? 0 : nouveau_screen_transfer_flags(usage),
                        nv->client);
   if (ret) {
      FREE(tx);
      return NULL;
   }
   map = (uint8_t *)buf->bo->map + buf->offset + box->x;

   /* using kernel fences only if !buf->mm */
   if ((usage & PIPE_TRANSFER_UNSYNCHRONIZED) || !buf->mm)
      return map;

   if (nouveau_buffer_busy(buf, usage & PIPE_TRANSFER_READ_WRITE)) {
      if (unlikely(usage & PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE)) {
         /* Discarding was not possible, must sync because
          * subsequent transfers might use UNSYNCHRONIZED. */
         nouveau_buffer_sync(buf, usage & PIPE_TRANSFER_READ_WRITE);
      } else
      if (usage & PIPE_TRANSFER_DISCARD_RANGE) {
         nouveau_transfer_staging(nv, tx, TRUE);
         map = tx->map;
      } else
      if (nouveau_buffer_busy(buf, PIPE_TRANSFER_READ)) {
         if (usage & PIPE_TRANSFER_DONTBLOCK)
            map = NULL;
         else
            nouveau_buffer_sync(buf, usage & PIPE_TRANSFER_READ_WRITE);
      } else {
         nouveau_transfer_staging(nv, tx, TRUE);
         if (tx->map)
            memcpy(tx->map, map, box->width);
         map = tx->map;
      }
   }
   if (!map)
      FREE(tx);
   return map;
}
Пример #16
0
/* Returns a pointer to a memory area representing a window into the
 * resource's data.
 *
 * This may or may not be the _actual_ memory area of the resource. However
 * when calling nouveau_buffer_transfer_unmap, if it wasn't the actual memory
 * area, the contents of the returned map are copied over to the resource.
 *
 * The usage indicates what the caller plans to do with the map:
 *
 *   WRITE means that the user plans to write to it
 *
 *   READ means that the user plans on reading from it
 *
 *   DISCARD_WHOLE_RESOURCE means that the whole resource is going to be
 *   potentially overwritten, and even if it isn't, the bits that aren't don't
 *   need to be maintained.
 *
 *   DISCARD_RANGE means that all the data in the specified range is going to
 *   be overwritten.
 *
 * The strategy for determining what kind of memory area to return is complex,
 * see comments inside of the function.
 */
static void *
nouveau_buffer_transfer_map(struct pipe_context *pipe,
                            struct pipe_resource *resource,
                            unsigned level, unsigned usage,
                            const struct pipe_box *box,
                            struct pipe_transfer **ptransfer)
{
   struct nouveau_context *nv = nouveau_context(pipe);
   struct nv04_resource *buf = nv04_resource(resource);
   struct nouveau_transfer *tx = MALLOC_STRUCT(nouveau_transfer);
   uint8_t *map;
   int ret;

   if (!tx)
      return NULL;
   nouveau_buffer_transfer_init(tx, resource, box, usage);
   *ptransfer = &tx->base;

   if (usage & PIPE_TRANSFER_READ)
      NOUVEAU_DRV_STAT(nv->screen, buf_transfers_rd, 1);
   if (usage & PIPE_TRANSFER_WRITE)
      NOUVEAU_DRV_STAT(nv->screen, buf_transfers_wr, 1);

   /* If we are trying to write to an uninitialized range, the user shouldn't
    * care what was there before. So we can treat the write as if the target
    * range were being discarded. Furthermore, since we know that even if this
    * buffer is busy due to GPU activity, because the contents were
    * uninitialized, the GPU can't care what was there, and so we can treat
    * the write as being unsynchronized.
    */
   if ((usage & PIPE_TRANSFER_WRITE) &&
       !util_ranges_intersect(&buf->valid_buffer_range, box->x, box->x + box->width))
      usage |= PIPE_TRANSFER_DISCARD_RANGE | PIPE_TRANSFER_UNSYNCHRONIZED;

   if (usage & PIPE_TRANSFER_PERSISTENT)
      usage |= PIPE_TRANSFER_UNSYNCHRONIZED;

   if (buf->domain == NOUVEAU_BO_VRAM) {
      if (usage & NOUVEAU_TRANSFER_DISCARD) {
         /* Set up a staging area for the user to write to. It will be copied
          * back into VRAM on unmap. */
         if (usage & PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE)
            buf->status &= NOUVEAU_BUFFER_STATUS_REALLOC_MASK;
         nouveau_transfer_staging(nv, tx, TRUE);
      } else {
         if (buf->status & NOUVEAU_BUFFER_STATUS_GPU_WRITING) {
            /* The GPU is currently writing to this buffer. Copy its current
             * contents to a staging area in the GART. This is necessary since
             * not the whole area being mapped is being discarded.
             */
            if (buf->data) {
               align_free(buf->data);
               buf->data = NULL;
            }
            nouveau_transfer_staging(nv, tx, FALSE);
            nouveau_transfer_read(nv, tx);
         } else {
            /* The buffer is currently idle. Create a staging area for writes,
             * and make sure that the cached data is up-to-date. */
            if (usage & PIPE_TRANSFER_WRITE)
               nouveau_transfer_staging(nv, tx, TRUE);
            if (!buf->data)
               nouveau_buffer_cache(nv, buf);
         }
      }
      return buf->data ? (buf->data + box->x) : tx->map;
   } else
   if (unlikely(buf->domain == 0)) {
      return buf->data + box->x;
   }

   /* At this point, buf->domain == GART */

   if (nouveau_buffer_should_discard(buf, usage)) {
      int ref = buf->base.reference.count - 1;
      nouveau_buffer_reallocate(nv->screen, buf, buf->domain);
      if (ref > 0) /* any references inside context possible ? */
         nv->invalidate_resource_storage(nv, &buf->base, ref);
   }

   /* Note that nouveau_bo_map ends up doing a nouveau_bo_wait with the
    * relevant flags. If buf->mm is set, that means this resource is part of a
    * larger slab bo that holds multiple resources. So in that case, don't
    * wait on the whole slab and instead use the logic below to return a
    * reasonable buffer for that case.
    */
   ret = nouveau_bo_map(buf->bo,
                        buf->mm ? 0 : nouveau_screen_transfer_flags(usage),
                        nv->client);
   if (ret) {
      FREE(tx);
      return NULL;
   }
   map = (uint8_t *)buf->bo->map + buf->offset + box->x;

   /* using kernel fences only if !buf->mm */
   if ((usage & PIPE_TRANSFER_UNSYNCHRONIZED) || !buf->mm)
      return map;

   /* If the GPU is currently reading/writing this buffer, we shouldn't
    * interfere with its progress. So instead we either wait for the GPU to
    * complete its operation, or set up a staging area to perform our work in.
    */
   if (nouveau_buffer_busy(buf, usage & PIPE_TRANSFER_READ_WRITE)) {
      if (unlikely(usage & PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE)) {
         /* Discarding was not possible, must sync because
          * subsequent transfers might use UNSYNCHRONIZED. */
         nouveau_buffer_sync(buf, usage & PIPE_TRANSFER_READ_WRITE);
      } else
      if (usage & PIPE_TRANSFER_DISCARD_RANGE) {
         /* The whole range is being discarded, so it doesn't matter what was
          * there before. No need to copy anything over. */
         nouveau_transfer_staging(nv, tx, TRUE);
         map = tx->map;
      } else
      if (nouveau_buffer_busy(buf, PIPE_TRANSFER_READ)) {
         if (usage & PIPE_TRANSFER_DONTBLOCK)
            map = NULL;
         else
            nouveau_buffer_sync(buf, usage & PIPE_TRANSFER_READ_WRITE);
      } else {
         /* It is expected that the returned buffer be a representation of the
          * data in question, so we must copy it over from the buffer. */
         nouveau_transfer_staging(nv, tx, TRUE);
         if (tx->map)
            memcpy(tx->map, map, box->width);
         map = tx->map;
      }
   }
   if (!map)
      FREE(tx);
   return map;
}