static INLINE boolean
nouveau_buffer_allocate(struct nouveau_screen *screen,
                        struct nv04_resource *buf, unsigned domain)
{
   uint32_t size = buf->base.width0;

   if (buf->base.bind & PIPE_BIND_CONSTANT_BUFFER)
      size = align(size, 0x100);

   if (domain == NOUVEAU_BO_VRAM) {
      buf->mm = nouveau_mm_allocate(screen->mm_VRAM, size,
                                    &buf->bo, &buf->offset);
      if (!buf->bo)
         return nouveau_buffer_allocate(screen, buf, NOUVEAU_BO_GART);
   } else
   if (domain == NOUVEAU_BO_GART) {
      buf->mm = nouveau_mm_allocate(screen->mm_GART, size,
                                    &buf->bo, &buf->offset);
      if (!buf->bo)
         return FALSE;
   }
   if (domain != NOUVEAU_BO_GART) {
      if (!buf->data) {
         buf->data = MALLOC(buf->base.width0);
         if (!buf->data)
            return FALSE;
      }
   }
   buf->domain = domain;
   if (buf->bo)
      buf->address = buf->bo->offset + buf->offset;

   return TRUE;
}
Ejemplo n.º 2
0
static INLINE boolean
nouveau_buffer_allocate(struct nouveau_screen *screen,
                        struct nv04_resource *buf, unsigned domain)
{
   uint32_t size = buf->base.width0;

   if (buf->base.bind & (PIPE_BIND_CONSTANT_BUFFER |
                         PIPE_BIND_COMPUTE_RESOURCE |
                         PIPE_BIND_SHADER_RESOURCE))
      size = align(size, 0x100);

   if (domain == NOUVEAU_BO_VRAM) {
      buf->mm = nouveau_mm_allocate(screen->mm_VRAM, size,
                                    &buf->bo, &buf->offset);
      if (!buf->bo)
         return nouveau_buffer_allocate(screen, buf, NOUVEAU_BO_GART);
      NOUVEAU_DRV_STAT(screen, buf_obj_current_bytes_vid, buf->base.width0);
   } else
   if (domain == NOUVEAU_BO_GART) {
      buf->mm = nouveau_mm_allocate(screen->mm_GART, size,
                                    &buf->bo, &buf->offset);
      if (!buf->bo)
         return FALSE;
      NOUVEAU_DRV_STAT(screen, buf_obj_current_bytes_sys, buf->base.width0);
   } else {
      assert(domain == 0);
      if (!nouveau_buffer_malloc(buf))
         return FALSE;
   }
   buf->domain = domain;
   if (buf->bo)
      buf->address = buf->bo->offset + buf->offset;

   return TRUE;
}
Ejemplo n.º 3
0
static inline bool
nouveau_buffer_allocate(struct nouveau_screen *screen,
                        struct nv04_resource *buf, unsigned domain)
{
   uint32_t size = align(buf->base.width0, 0x100);

   if (domain == NOUVEAU_BO_VRAM) {
      buf->mm = nouveau_mm_allocate(screen->mm_VRAM, size,
                                    &buf->bo, &buf->offset);
      if (!buf->bo)
         return nouveau_buffer_allocate(screen, buf, NOUVEAU_BO_GART);
      NOUVEAU_DRV_STAT(screen, buf_obj_current_bytes_vid, buf->base.width0);
   } else
   if (domain == NOUVEAU_BO_GART) {
      buf->mm = nouveau_mm_allocate(screen->mm_GART, size,
                                    &buf->bo, &buf->offset);
      if (!buf->bo)
         return false;
      NOUVEAU_DRV_STAT(screen, buf_obj_current_bytes_sys, buf->base.width0);
   } else {
      assert(domain == 0);
      if (!nouveau_buffer_malloc(buf))
         return false;
   }
   buf->domain = domain;
   if (buf->bo)
      buf->address = buf->bo->offset + buf->offset;

   util_range_set_empty(&buf->valid_buffer_range);

   return true;
}
static boolean
nv50_query_allocate(struct nv50_context *nv50, struct nv50_query *q, int size)
{
   struct nv50_screen *screen = nv50->screen;
   int ret;

   if (q->bo) {
      nouveau_bo_ref(NULL, &q->bo);
      if (q->mm) {
         if (q->ready)
            nouveau_mm_free(q->mm);
         else
            nouveau_fence_work(screen->base.fence.current, nouveau_mm_free_work,
                               q->mm);
      }
   }
   if (size) {
      q->mm = nouveau_mm_allocate(screen->base.mm_GART, size, &q->bo, &q->base);
      if (!q->bo)
         return FALSE;
      q->offset = q->base;

      ret = nouveau_bo_map(q->bo, 0, screen->base.client);
      if (ret) {
         nv50_query_allocate(nv50, q, 0);
         return FALSE;
      }
      q->data = (uint32_t *)((uint8_t *)q->bo->map + q->base);
   }
   return TRUE;
}
/* Maybe just migrate to GART right away if we actually need to do this. */
boolean
nouveau_buffer_download(struct nouveau_context *nv, struct nv04_resource *buf,
                        unsigned start, unsigned size)
{
   struct nouveau_mm_allocation *mm;
   struct nouveau_bo *bounce = NULL;
   uint32_t offset;

   assert(buf->domain == NOUVEAU_BO_VRAM);

   mm = nouveau_mm_allocate(nv->screen->mm_GART, size, &bounce, &offset);
   if (!bounce)
      return FALSE;

   nv->copy_data(nv, bounce, offset, NOUVEAU_BO_GART,
                 buf->bo, buf->offset + start, NOUVEAU_BO_VRAM, size);

   if (nouveau_bo_map(bounce, NOUVEAU_BO_RD, nv->screen->client))
      return FALSE;
   memcpy(buf->data + start, (uint8_t *)bounce->map + offset, size);

   buf->status &= ~NOUVEAU_BUFFER_STATUS_GPU_WRITING;

   nouveau_bo_ref(NULL, &bounce);
   if (mm)
      nouveau_mm_free(mm);
   return TRUE;
}
Ejemplo n.º 6
0
static void
nv50_compute_upload_input(struct nv50_context *nv50, const uint32_t *input)
{
   struct nv50_screen *screen = nv50->screen;
   struct nouveau_pushbuf *push = screen->base.pushbuf;
   unsigned size = align(nv50->compprog->parm_size, 0x4);

   BEGIN_NV04(push, NV50_COMPUTE(USER_PARAM_COUNT), 1);
   PUSH_DATA (push, (size / 4) << 8);

   if (size) {
      struct nouveau_mm_allocation *mm;
      struct nouveau_bo *bo = NULL;
      unsigned offset;

      mm = nouveau_mm_allocate(screen->base.mm_GART, size, &bo, &offset);
      assert(mm);

      nouveau_bo_map(bo, 0, screen->base.client);
      memcpy(bo->map + offset, input, size);

      nouveau_bufctx_refn(nv50->bufctx, 0, bo, NOUVEAU_BO_GART | NOUVEAU_BO_RD);
      nouveau_pushbuf_bufctx(push, nv50->bufctx);
      nouveau_pushbuf_validate(push);

      BEGIN_NV04(push, NV50_COMPUTE(USER_PARAM(0)), size / 4);
      nouveau_pushbuf_data(push, bo, offset, size);

      nouveau_fence_work(screen->base.fence.current, nouveau_mm_free_work, mm);
      nouveau_bo_ref(NULL, &bo);
      nouveau_bufctx_reset(nv50->bufctx, 0);
   }
}
Ejemplo n.º 7
0
static bool
nv50_hw_query_allocate(struct nv50_context *nv50, struct nv50_query *q,
                       int size)
{
   struct nv50_screen *screen = nv50->screen;
   struct nv50_hw_query *hq = nv50_hw_query(q);
   int ret;

   if (hq->bo) {
      nouveau_bo_ref(NULL, &hq->bo);
      if (hq->mm) {
         if (hq->state == NV50_HW_QUERY_STATE_READY)
            nouveau_mm_free(hq->mm);
         else
            nouveau_fence_work(screen->base.fence.current,
                               nouveau_mm_free_work, hq->mm);
      }
   }
   if (size) {
      hq->mm = nouveau_mm_allocate(screen->base.mm_GART, size,
                                   &hq->bo, &hq->base_offset);
      if (!hq->bo)
         return false;
      hq->offset = hq->base_offset;

      ret = nouveau_bo_map(hq->bo, 0, screen->base.client);
      if (ret) {
         nv50_hw_query_allocate(nv50, q, 0);
         return false;
      }
      hq->data = (uint32_t *)((uint8_t *)hq->bo->map + hq->base_offset);
   }
   return true;
}
Ejemplo n.º 8
0
static uint8_t *
nouveau_transfer_staging(struct nouveau_context *nv,
                         struct nouveau_transfer *tx, boolean permit_pb)
{
   const unsigned adj = tx->base.box.x & NOUVEAU_MIN_BUFFER_MAP_ALIGN_MASK;
   const unsigned size = align(tx->base.box.width, 4) + adj;

   if (!nv->push_data)
      permit_pb = FALSE;

   if ((size <= NOUVEAU_TRANSFER_PUSHBUF_THRESHOLD) && permit_pb) {
      tx->map = align_malloc(size, NOUVEAU_MIN_BUFFER_MAP_ALIGN);
      if (tx->map)
         tx->map += adj;
   } else {
      tx->mm =
         nouveau_mm_allocate(nv->screen->mm_GART, size, &tx->bo, &tx->offset);
      if (tx->bo) {
         tx->offset += adj;
         if (!nouveau_bo_map(tx->bo, 0, NULL))
            tx->map = (uint8_t *)tx->bo->map + tx->offset;
      }
   }
   return tx->map;
}
Ejemplo n.º 9
0
static boolean
nouveau_buffer_upload(struct nouveau_context *nv, struct nv04_resource *buf,
                      unsigned start, unsigned size)
{
   struct nouveau_mm_allocation *mm;
   struct nouveau_bo *bounce = NULL;
   uint32_t offset;

   if (size <= 192) {
      if (buf->base.bind & PIPE_BIND_CONSTANT_BUFFER)
         nv->push_cb(nv, buf->bo, buf->domain, buf->offset, buf->base.width0,
                     start, size / 4, (const uint32_t *)(buf->data + start));
      else
         nv->push_data(nv, buf->bo, buf->offset + start, buf->domain,
                       size, buf->data + start);
      return TRUE;
   }

   mm = nouveau_mm_allocate(nv->screen->mm_GART, size, &bounce, &offset);
   if (!bounce)
      return FALSE;

   nouveau_bo_map_range(bounce, offset, size,
                        NOUVEAU_BO_WR | NOUVEAU_BO_NOSYNC);
   memcpy(bounce->map, buf->data + start, size);
   nouveau_bo_unmap(bounce);

   nv->copy_data(nv, buf->bo, buf->offset + start, NOUVEAU_BO_VRAM,
                 bounce, offset, NOUVEAU_BO_GART, size);

   nouveau_bo_ref(NULL, &bounce);
   if (mm)
      release_allocation(&mm, nv->screen->fence.current);

   if (start == 0 && size == buf->base.width0)
      buf->status &= ~NOUVEAU_BUFFER_STATUS_GPU_WRITING;
   return TRUE;
}