static INLINE void
release_allocation(struct nouveau_mm_allocation **mm,
                   struct nouveau_fence *fence)
{
   nouveau_fence_work(fence, nouveau_mm_free_work, *mm);
   (*mm) = NULL;
}
static boolean
nv50_query_allocate(struct nv50_context *nv50, struct nv50_query *q, int size)
{
   struct nv50_screen *screen = nv50->screen;
   int ret;

   if (q->bo) {
      nouveau_bo_ref(NULL, &q->bo);
      if (q->mm) {
         if (q->ready)
            nouveau_mm_free(q->mm);
         else
            nouveau_fence_work(screen->base.fence.current, nouveau_mm_free_work,
                               q->mm);
      }
   }
   if (size) {
      q->mm = nouveau_mm_allocate(screen->base.mm_GART, size, &q->bo, &q->base);
      if (!q->bo)
         return FALSE;
      q->offset = q->base;

      ret = nouveau_bo_map(q->bo, 0, screen->base.client);
      if (ret) {
         nv50_query_allocate(nv50, q, 0);
         return FALSE;
      }
      q->data = (uint32_t *)((uint8_t *)q->bo->map + q->base);
   }
   return TRUE;
}
예제 #3
0
static void
nv50_compute_upload_input(struct nv50_context *nv50, const uint32_t *input)
{
   struct nv50_screen *screen = nv50->screen;
   struct nouveau_pushbuf *push = screen->base.pushbuf;
   unsigned size = align(nv50->compprog->parm_size, 0x4);

   BEGIN_NV04(push, NV50_COMPUTE(USER_PARAM_COUNT), 1);
   PUSH_DATA (push, (size / 4) << 8);

   if (size) {
      struct nouveau_mm_allocation *mm;
      struct nouveau_bo *bo = NULL;
      unsigned offset;

      mm = nouveau_mm_allocate(screen->base.mm_GART, size, &bo, &offset);
      assert(mm);

      nouveau_bo_map(bo, 0, screen->base.client);
      memcpy(bo->map + offset, input, size);

      nouveau_bufctx_refn(nv50->bufctx, 0, bo, NOUVEAU_BO_GART | NOUVEAU_BO_RD);
      nouveau_pushbuf_bufctx(push, nv50->bufctx);
      nouveau_pushbuf_validate(push);

      BEGIN_NV04(push, NV50_COMPUTE(USER_PARAM(0)), size / 4);
      nouveau_pushbuf_data(push, bo, offset, size);

      nouveau_fence_work(screen->base.fence.current, nouveau_mm_free_work, mm);
      nouveau_bo_ref(NULL, &bo);
      nouveau_bufctx_reset(nv50->bufctx, 0);
   }
}
예제 #4
0
static bool
nv50_hw_query_allocate(struct nv50_context *nv50, struct nv50_query *q,
                       int size)
{
   struct nv50_screen *screen = nv50->screen;
   struct nv50_hw_query *hq = nv50_hw_query(q);
   int ret;

   if (hq->bo) {
      nouveau_bo_ref(NULL, &hq->bo);
      if (hq->mm) {
         if (hq->state == NV50_HW_QUERY_STATE_READY)
            nouveau_mm_free(hq->mm);
         else
            nouveau_fence_work(screen->base.fence.current,
                               nouveau_mm_free_work, hq->mm);
      }
   }
   if (size) {
      hq->mm = nouveau_mm_allocate(screen->base.mm_GART, size,
                                   &hq->bo, &hq->base_offset);
      if (!hq->bo)
         return false;
      hq->offset = hq->base_offset;

      ret = nouveau_bo_map(hq->bo, 0, screen->base.client);
      if (ret) {
         nv50_hw_query_allocate(nv50, q, 0);
         return false;
      }
      hq->data = (uint32_t *)((uint8_t *)hq->bo->map + hq->base_offset);
   }
   return true;
}
예제 #5
0
static void
nouveau_gem_object_unmap(struct nouveau_bo *nvbo, struct nvkm_vma *vma)
{
	const bool mapped = nvbo->bo.mem.mem_type != TTM_PL_SYSTEM;
	struct reservation_object *resv = nvbo->bo.resv;
	struct reservation_object_list *fobj;
	struct fence *fence = NULL;

	fobj = reservation_object_get_list(resv);

	list_del(&vma->head);

	if (fobj && fobj->shared_count > 1)
		ttm_bo_wait(&nvbo->bo, true, false, false);
	else if (fobj && fobj->shared_count == 1)
		fence = rcu_dereference_protected(fobj->shared[0],
						reservation_object_held(resv));
	else
		fence = reservation_object_get_excl(nvbo->bo.resv);

	if (fence && mapped) {
		nouveau_fence_work(fence, nouveau_gem_object_delete, vma);
	} else {
		if (mapped)
			nvkm_vm_unmap(vma);
		nvkm_vm_put(vma);
		kfree(vma);
	}
}
예제 #6
0
void
nouveau_scratch_runout_release(struct nouveau_context *nv)
{
   if (!nv->scratch.runout)
      return;

   if (!nouveau_fence_work(nv->screen->fence.current, nouveau_scratch_unref_bos,
         nv->scratch.runout))
      return;

   nv->scratch.end = 0;
   nv->scratch.runout = NULL;
}
예제 #7
0
static inline void
nouveau_buffer_transfer_del(struct nouveau_context *nv,
                            struct nouveau_transfer *tx)
{
   if (tx->map) {
      if (likely(tx->bo)) {
         nouveau_fence_work(nv->screen->fence.current,
                            nouveau_fence_unref_bo, tx->bo);
         if (tx->mm)
            release_allocation(&tx->mm, nv->screen->fence.current);
      } else {
         align_free(tx->map -
                    (tx->base.box.x & NOUVEAU_MIN_BUFFER_MAP_ALIGN_MASK));
      }
   }
}
예제 #8
0
inline void
nouveau_buffer_release_gpu_storage(struct nv04_resource *buf)
{
   if (buf->fence && buf->fence->state < NOUVEAU_FENCE_STATE_FLUSHED) {
      nouveau_fence_work(buf->fence, nouveau_fence_unref_bo, buf->bo);
      buf->bo = NULL;
   } else {
      nouveau_bo_ref(NULL, &buf->bo);
   }

   if (buf->mm)
      release_allocation(&buf->mm, buf->fence);

   if (buf->domain == NOUVEAU_BO_VRAM)
      NOUVEAU_DRV_STAT_RES(buf, buf_obj_current_bytes_vid, -(uint64_t)buf->base.width0);
   if (buf->domain == NOUVEAU_BO_GART)
      NOUVEAU_DRV_STAT_RES(buf, buf_obj_current_bytes_sys, -(uint64_t)buf->base.width0);

   buf->domain = 0;
}
예제 #9
0
void
nvc0_miptree_transfer_unmap(struct pipe_context *pctx,
                            struct pipe_transfer *transfer)
{
   struct nvc0_context *nvc0 = nvc0_context(pctx);
   struct nvc0_transfer *tx = (struct nvc0_transfer *)transfer;
   struct nv50_miptree *mt = nv50_miptree(tx->base.resource);
   unsigned i;

   if (tx->base.usage & PIPE_TRANSFER_MAP_DIRECTLY) {
      pipe_resource_reference(&transfer->resource, NULL);

      FREE(tx);
      return;
   }

   if (tx->base.usage & PIPE_TRANSFER_WRITE) {
      for (i = 0; i < tx->nlayers; ++i) {
         nvc0->m2mf_copy_rect(nvc0, &tx->rect[0], &tx->rect[1],
                              tx->nblocksx, tx->nblocksy);
         if (mt->layout_3d)
            tx->rect[0].z++;
         else
            tx->rect[0].base += mt->layer_stride;
         tx->rect[1].base += tx->nblocksy * tx->base.stride;
      }
      NOUVEAU_DRV_STAT(&nvc0->screen->base, tex_transfers_wr, 1);

      /* Allow the copies above to finish executing before freeing the source */
      nouveau_fence_work(nvc0->screen->base.fence.current,
                         nouveau_fence_unref_bo, tx->rect[1].bo);
   } else {
      nouveau_bo_ref(NULL, &tx->rect[1].bo);
   }
   if (tx->base.usage & PIPE_TRANSFER_READ)
      NOUVEAU_DRV_STAT(&nvc0->screen->base, tex_transfers_rd, 1);

   pipe_resource_reference(&transfer->resource, NULL);

   FREE(tx);
}
예제 #10
0
static void
nouveau_gem_object_unmap(struct nouveau_bo *nvbo, struct nouveau_vma *vma)
{
	const bool mapped = nvbo->bo.mem.mem_type != TTM_PL_SYSTEM;
	struct nouveau_fence *fence = NULL;

	list_del(&vma->head);

	if (mapped) {
		spin_lock(&nvbo->bo.bdev->fence_lock);
		fence = nouveau_fence_ref(nvbo->bo.sync_obj);
		spin_unlock(&nvbo->bo.bdev->fence_lock);
	}

	if (fence) {
		nouveau_fence_work(fence, nouveau_gem_object_delete, vma);
	} else {
		if (mapped)
			nouveau_vm_unmap(vma);
		nouveau_vm_put(vma);
		kfree(vma);
	}
	nouveau_fence_unref(&fence);
}
예제 #11
0
/* Migrate a linear buffer (vertex, index, constants) USER -> GART -> VRAM. */
bool
nouveau_buffer_migrate(struct nouveau_context *nv,
                       struct nv04_resource *buf, const unsigned new_domain)
{
   struct nouveau_screen *screen = nv->screen;
   struct nouveau_bo *bo;
   const unsigned old_domain = buf->domain;
   unsigned size = buf->base.width0;
   unsigned offset;
   int ret;

   assert(new_domain != old_domain);

   if (new_domain == NOUVEAU_BO_GART && old_domain == 0) {
      if (!nouveau_buffer_allocate(screen, buf, new_domain))
         return false;
      ret = nouveau_bo_map(buf->bo, 0, nv->client);
      if (ret)
         return ret;
      memcpy((uint8_t *)buf->bo->map + buf->offset, buf->data, size);
      align_free(buf->data);
   } else
   if (old_domain != 0 && new_domain != 0) {
      struct nouveau_mm_allocation *mm = buf->mm;

      if (new_domain == NOUVEAU_BO_VRAM) {
         /* keep a system memory copy of our data in case we hit a fallback */
         if (!nouveau_buffer_data_fetch(nv, buf, buf->bo, buf->offset, size))
            return false;
         if (nouveau_mesa_debug)
            debug_printf("migrating %u KiB to VRAM\n", size / 1024);
      }

      offset = buf->offset;
      bo = buf->bo;
      buf->bo = NULL;
      buf->mm = NULL;
      nouveau_buffer_allocate(screen, buf, new_domain);

      nv->copy_data(nv, buf->bo, buf->offset, new_domain,
                    bo, offset, old_domain, buf->base.width0);

      nouveau_fence_work(screen->fence.current, nouveau_fence_unref_bo, bo);
      if (mm)
         release_allocation(&mm, screen->fence.current);
   } else
   if (new_domain == NOUVEAU_BO_VRAM && old_domain == 0) {
      struct nouveau_transfer tx;
      if (!nouveau_buffer_allocate(screen, buf, NOUVEAU_BO_VRAM))
         return false;
      tx.base.resource = &buf->base;
      tx.base.box.x = 0;
      tx.base.box.width = buf->base.width0;
      tx.bo = NULL;
      tx.map = NULL;
      if (!nouveau_transfer_staging(nv, &tx, false))
         return false;
      nouveau_transfer_write(nv, &tx, 0, tx.base.box.width);
      nouveau_buffer_transfer_del(nv, &tx);
   } else
      return false;

   assert(buf->domain == new_domain);
   return true;
}