Beispiel #1
0
static void
nv50_screen_destroy(struct pipe_screen *pscreen)
{
	struct nv50_screen *screen = nv50_screen(pscreen);
	unsigned i;

	for (i = 0; i < 3; i++) {
		if (screen->constbuf_parm[i])
			nouveau_bo_ref(NULL, &screen->constbuf_parm[i]);
	}

	if (screen->constbuf_misc[0])
		nouveau_bo_ref(NULL, &screen->constbuf_misc[0]);
	if (screen->tic)
		nouveau_bo_ref(NULL, &screen->tic);
	if (screen->tsc)
		nouveau_bo_ref(NULL, &screen->tsc);
	if (screen->static_init)
		so_ref(NULL, &screen->static_init);

	nouveau_notifier_free(&screen->sync);
	nouveau_grobj_free(&screen->tesla);
	nouveau_grobj_free(&screen->eng2d);
	nouveau_grobj_free(&screen->m2mf);
	nouveau_resource_destroy(&screen->immd_heap[0]);
	nouveau_resource_destroy(&screen->parm_heap[0]);
	nouveau_resource_destroy(&screen->parm_heap[1]);
	nouveau_screen_fini(&screen->base);
	FREE(screen);
}
Beispiel #2
0
static void test_i915_nv_reimport_twice_check_flink_name(void)
{
    drm_intel_bo *test_intel_bo;
    int prime_fd;
    struct nouveau_bo *nvbo = NULL, *nvbo2 = NULL;
    uint32_t flink_name1, flink_name2;

    test_intel_bo = drm_intel_bo_alloc(bufmgr, "test bo", BO_SIZE, 4096);

    igt_assert(drm_intel_bo_gem_export_to_prime(test_intel_bo, &prime_fd) == 0);

    igt_assert(nouveau_bo_prime_handle_ref(ndev, prime_fd, &nvbo) == 0);

    /* create a new dma-buf */
    close(prime_fd);
    igt_assert(drm_intel_bo_gem_export_to_prime(test_intel_bo, &prime_fd) == 0);

    igt_assert(nouveau_bo_prime_handle_ref(ndev2, prime_fd, &nvbo2) == 0);
    close(prime_fd);

    igt_assert(nouveau_bo_name_get(nvbo, &flink_name1) == 0);
    igt_assert(nouveau_bo_name_get(nvbo2, &flink_name2) == 0);

    igt_assert_eq_u32(flink_name1, flink_name2);

    nouveau_bo_ref(NULL, &nvbo2);
    nouveau_bo_ref(NULL, &nvbo);
    drm_intel_bo_unreference(test_intel_bo);
}
Beispiel #3
0
static struct nouveau_bo *
nouveau_channel_user_pushbuf_alloc(struct drm_device *dev)
{
	struct nouveau_bo *pushbuf = NULL;
	int location, ret;

	if (nouveau_vram_pushbuf)
		location = TTM_PL_FLAG_VRAM;
	else
		location = TTM_PL_FLAG_TT;

	ret = nouveau_bo_new(dev, NULL, 65536, 0, location, 0, 0x0000, false,
			     true, &pushbuf);
	if (ret) {
		NV_ERROR(dev, "error allocating DMA push buffer: %d\n", ret);
		return NULL;
	}

	ret = nouveau_bo_pin(pushbuf, location);
	if (ret) {
		NV_ERROR(dev, "error pinning DMA push buffer: %d\n", ret);
		nouveau_bo_ref(NULL, &pushbuf);
		return NULL;
	}

	ret = nouveau_bo_map(pushbuf);
	if (ret) {
		nouveau_bo_unpin(pushbuf);
		nouveau_bo_ref(NULL, &pushbuf);
		return NULL;
	}

	return pushbuf;
}
Beispiel #4
0
static void
vbo_bind_vertices(struct gl_context *ctx, const struct gl_client_array **arrays,
		  int base, unsigned min_index, unsigned max_index, int *pdelta)
{
	struct nouveau_render_state *render = to_render_state(ctx);
	struct nouveau_pushbuf *push = context_push(ctx);
	struct nouveau_bo *bo[NUM_VERTEX_ATTRS];
	unsigned offset[NUM_VERTEX_ATTRS];
	GLboolean dirty = GL_FALSE;
	int i, j, attr;
	RENDER_LOCALS(ctx);

	*pdelta = -1;

	FOR_EACH_BOUND_ATTR(render, i, attr) {
		const struct gl_client_array *array = arrays[attr];
		struct gl_buffer_object *obj = array->BufferObj;
		struct nouveau_array *a = &render->attrs[attr];
		unsigned delta = (base + min_index) * array->StrideB;

		bo[i] = NULL;

		if (nouveau_bufferobj_hw(obj)) {
			/* Array in a buffer obj. */
			nouveau_bo_ref(to_nouveau_bufferobj(obj)->bo, &bo[i]);
			offset[i] = delta + (intptr_t)array->Ptr;

		} else {
			int n = max_index - min_index + 1;
			char *sp = (char *)ADD_POINTERS(
				nouveau_bufferobj_sys(obj), array->Ptr) + delta;
			char *dp  = nouveau_get_scratch(ctx, n * a->stride,
							&bo[i], &offset[i]);

			/* Array in client memory, move it to a
			 * scratch buffer obj. */
			for (j = 0; j < n; j++)
				memcpy(dp + j * a->stride,
				       sp + j * array->StrideB,
				       a->stride);
		}

		dirty |= check_update_array(a, offset[i], bo[i], pdelta);
	}

	*pdelta -= min_index;

	if (dirty) {
		/* Buffers changed, update the attribute binding. */
		FOR_EACH_BOUND_ATTR(render, i, attr) {
			struct nouveau_array *a = &render->attrs[attr];

			nouveau_bo_ref(NULL, &a->bo);
			a->offset = offset[i];
			a->bo = bo[i];
		}

		TAG(render_release_vertices)(ctx);
		TAG(render_bind_vertices)(ctx);
	} else {
Beispiel #5
0
int
nv84_fence_create(struct nouveau_drm *drm)
{
	struct nouveau_fifo *pfifo = nouveau_fifo(drm->device);
	struct nv84_fence_priv *priv;
	int ret;

	priv = drm->fence = kzalloc(sizeof(*priv), GFP_KERNEL);
	if (!priv)
		return -ENOMEM;

	priv->base.dtor = nv84_fence_destroy;
	priv->base.suspend = nv84_fence_suspend;
	priv->base.resume = nv84_fence_resume;
	priv->base.context_new = nv84_fence_context_new;
	priv->base.context_del = nv84_fence_context_del;

#ifdef __NetBSD__
	spin_lock_init(&priv->base.waitlock);
	DRM_INIT_WAITQUEUE(&priv->base.waitqueue, "nvfenceq");
#else
	init_waitqueue_head(&priv->base.waiting);
#endif
	priv->base.uevent = true;

	ret = nouveau_bo_new(drm->dev, 16 * (pfifo->max + 1), 0,
			     TTM_PL_FLAG_VRAM, 0, 0, NULL, &priv->bo);
	if (ret == 0) {
		ret = nouveau_bo_pin(priv->bo, TTM_PL_FLAG_VRAM);
		if (ret == 0) {
			ret = nouveau_bo_map(priv->bo);
			if (ret)
				nouveau_bo_unpin(priv->bo);
		}
		if (ret)
			nouveau_bo_ref(NULL, &priv->bo);
	}

	if (ret == 0)
		ret = nouveau_bo_new(drm->dev, 16 * (pfifo->max + 1), 0,
				     TTM_PL_FLAG_TT, 0, 0, NULL,
				     &priv->bo_gart);
	if (ret == 0) {
		ret = nouveau_bo_pin(priv->bo_gart, TTM_PL_FLAG_TT);
		if (ret == 0) {
			ret = nouveau_bo_map(priv->bo_gart);
			if (ret)
				nouveau_bo_unpin(priv->bo_gart);
		}
		if (ret)
			nouveau_bo_ref(NULL, &priv->bo_gart);
	}

	if (ret)
		nv84_fence_destroy(drm);
	return ret;
}
Beispiel #6
0
static void
nv84_fence_destroy(struct nouveau_drm *drm)
{
	struct nv84_fence_priv *priv = drm->fence;
	nouveau_bo_unmap(priv->bo_gart);
	if (priv->bo_gart)
		nouveau_bo_unpin(priv->bo_gart);
	nouveau_bo_ref(NULL, &priv->bo_gart);
	nouveau_bo_unmap(priv->bo);
	if (priv->bo)
		nouveau_bo_unpin(priv->bo);
	nouveau_bo_ref(NULL, &priv->bo);
	drm->fence = NULL;
	kfree(priv);
}
Beispiel #7
0
int
nouveau_gem_new(struct drm_device *dev, struct nouveau_channel *chan,
		int size, int align, uint32_t flags, uint32_t tile_mode,
		uint32_t tile_flags, bool no_vm, bool mappable,
		struct nouveau_bo **pnvbo)
{
	struct nouveau_bo *nvbo;
	int ret;

	ret = nouveau_bo_new(dev, chan, size, align, flags, tile_mode,
			     tile_flags, no_vm, mappable, pnvbo);
	if (ret)
		return ret;
	nvbo = *pnvbo;

	nvbo->gem = drm_gem_object_alloc(dev, nvbo->bo.mem.size);
	if (!nvbo->gem) {
		nouveau_bo_ref(NULL, pnvbo);
		return -ENOMEM;
	}

	nvbo->bo.persistant_swap_storage = nvbo->gem->filp;
	nvbo->gem->driver_private = nvbo;
	return 0;
}
static int
nouveau_prime_new(struct drm_device *dev,
		  size_t size,
		  struct sg_table *sg,
		  struct nouveau_bo **pnvbo)
{
	struct nouveau_bo *nvbo;
	u32 flags = 0;
	int ret;

	flags = TTM_PL_FLAG_TT;

	ret = nouveau_bo_new(dev, size, 0, flags, 0, 0,
			     sg, pnvbo);
	if (ret)
		return ret;
	nvbo = *pnvbo;

	nvbo->valid_domains = NOUVEAU_GEM_DOMAIN_GART;
	nvbo->gem = drm_gem_object_alloc(dev, nvbo->bo.mem.size);
	if (!nvbo->gem) {
		nouveau_bo_ref(NULL, pnvbo);
		return -ENOMEM;
	}

	nvbo->gem->driver_private = nvbo;
	return 0;
}
/* Allocate an extra bo if we can't fit everything we need simultaneously.
 * (Could happen for very large user arrays.)
 */
static INLINE boolean
nouveau_scratch_runout(struct nouveau_context *nv, unsigned size)
{
   int ret;
   const unsigned n = nv->scratch.nr_runout++;

   nv->scratch.runout = REALLOC(nv->scratch.runout,
                                (n + 0) * sizeof(*nv->scratch.runout),
                                (n + 1) * sizeof(*nv->scratch.runout));
   nv->scratch.runout[n] = NULL;

   ret = nouveau_scratch_bo_alloc(nv, &nv->scratch.runout[n], size);
   if (!ret) {
      ret = nouveau_bo_map(nv->scratch.runout[n], 0, NULL);
      if (ret)
         nouveau_bo_ref(NULL, &nv->scratch.runout[--nv->scratch.nr_runout]);
   }
   if (!ret) {
      nv->scratch.current = nv->scratch.runout[n];
      nv->scratch.offset = 0;
      nv->scratch.end = size;
      nv->scratch.map = nv->scratch.current->map;
   }
   return !ret;
}
Beispiel #10
0
int nv50_tls_realloc(struct nv50_screen *screen, unsigned tls_space)
{
   struct nouveau_pushbuf *push = screen->base.pushbuf;
   int ret;
   uint64_t tls_size;

   if (tls_space < screen->cur_tls_space)
      return 0;
   if (tls_space > screen->max_tls_space) {
      /* fixable by limiting number of warps (LOCAL_WARPS_LOG_ALLOC /
       * LOCAL_WARPS_NO_CLAMP) */
      NOUVEAU_ERR("Unsupported number of temporaries (%u > %u). Fixable if someone cares.\n",
            (unsigned)(tls_space / ONE_TEMP_SIZE),
            (unsigned)(screen->max_tls_space / ONE_TEMP_SIZE));
      return -ENOMEM;
   }

   nouveau_bo_ref(NULL, &screen->tls_bo);
   ret = nv50_tls_alloc(screen, tls_space, &tls_size);
   if (ret)
      return ret;

   BEGIN_NV04(push, NV50_3D(LOCAL_ADDRESS_HIGH), 3);
   PUSH_DATAh(push, screen->tls_bo->offset);
   PUSH_DATA (push, screen->tls_bo->offset);
   PUSH_DATA (push, util_logbase2(screen->cur_tls_space / 8));

   return 1;
}
void
nouveau_channel_free(struct nouveau_channel **chan)
{
	struct nouveau_channel_priv *nvchan;
	struct nouveau_device_priv *nvdev;
	struct drm_nouveau_channel_free cf;

	if (!chan || !*chan)
		return;
	nvchan = nouveau_channel(*chan);
	*chan = NULL;
	nvdev = nouveau_device(nvchan->base.device);

	FIRE_RING(&nvchan->base);

	nouveau_bo_unmap(nvchan->notifier_bo);
	nouveau_bo_ref(NULL, &nvchan->notifier_bo);

	nouveau_grobj_free(&nvchan->base.vram);
	nouveau_grobj_free(&nvchan->base.gart);
	nouveau_grobj_free(&nvchan->base.nullobj);

	cf.channel = nvchan->drm.channel;
	drmCommandWrite(nvdev->fd, DRM_NOUVEAU_CHANNEL_FREE, &cf, sizeof(cf));
	free(nvchan);
}
Beispiel #12
0
static void test_nv_i915_reimport_twice_check_flink_name(void)
{
    drm_intel_bo *intel_bo = NULL, *intel_bo2 = NULL;
    int prime_fd;
    struct nouveau_bo *nvbo = NULL;
    uint32_t flink_name1, flink_name2;

    igt_assert(nouveau_bo_new(ndev, NOUVEAU_BO_GART | NOUVEAU_BO_MAP,
                              0, BO_SIZE, NULL, &nvbo) == 0);

    igt_assert(nouveau_bo_set_prime(nvbo, &prime_fd) == 0);

    intel_bo = drm_intel_bo_gem_create_from_prime(bufmgr, prime_fd, BO_SIZE);
    igt_assert(intel_bo);
    close(prime_fd);
    igt_assert(nouveau_bo_set_prime(nvbo, &prime_fd) == 0);

    intel_bo2 = drm_intel_bo_gem_create_from_prime(bufmgr2, prime_fd, BO_SIZE);
    igt_assert(intel_bo2);
    close(prime_fd);

    igt_assert(drm_intel_bo_flink(intel_bo, &flink_name1) == 0);
    igt_assert(drm_intel_bo_flink(intel_bo2, &flink_name2) == 0);

    igt_assert_eq_u32(flink_name1, flink_name2);

    nouveau_bo_ref(NULL, &nvbo);
    drm_intel_bo_unreference(intel_bo);
    drm_intel_bo_unreference(intel_bo2);
}
void
nv50_miptree_transfer_del(struct pipe_context *pcontext,
			  struct pipe_transfer *ptx)
{
	struct nv50_transfer *tx = (struct nv50_transfer *)ptx;
	struct nv50_miptree *mt = nv50_miptree(ptx->resource);
	struct pipe_resource *pt = ptx->resource;

	unsigned nx = util_format_get_nblocksx(pt->format, tx->base.box.width);
	unsigned ny = util_format_get_nblocksy(pt->format, tx->base.box.height);

	if (ptx->usage & PIPE_TRANSFER_WRITE) {
		struct pipe_screen *pscreen = pcontext->screen;

		nv50_transfer_rect_m2mf(pscreen, tx->bo, 0,
					tx->base.stride, tx->bo->tile_mode,
					0, 0, 0,
					tx->nblocksx, tx->nblocksy, 1,
					mt->base.bo, tx->level_offset,
					tx->level_pitch, tx->level_tiling,
					tx->level_x, tx->level_y, tx->level_z,
					tx->nblocksx, tx->nblocksy,
					tx->level_depth,
					util_format_get_blocksize(pt->format), nx, ny,
					NOUVEAU_BO_GART, NOUVEAU_BO_VRAM |
					NOUVEAU_BO_GART);
	}

	nouveau_bo_ref(NULL, &tx->bo);
	pipe_resource_reference(&ptx->resource, NULL);
	FREE(ptx);
}
Beispiel #14
0
/* Allocate an extra bo if we can't fit everything we need simultaneously.
 * (Could happen for very large user arrays.)
 */
static inline bool
nouveau_scratch_runout(struct nouveau_context *nv, unsigned size)
{
   int ret;
   unsigned n;

   if (nv->scratch.runout)
      n = nv->scratch.runout->nr;
   else
      n = 0;
   nv->scratch.runout = REALLOC(nv->scratch.runout, n == 0 ? 0 :
                                (sizeof(*nv->scratch.runout) + (n + 0) * sizeof(void *)),
                                 sizeof(*nv->scratch.runout) + (n + 1) * sizeof(void *));
   nv->scratch.runout->nr = n + 1;
   nv->scratch.runout->bo[n] = NULL;

   ret = nouveau_scratch_bo_alloc(nv, &nv->scratch.runout->bo[n], size);
   if (!ret) {
      ret = nouveau_bo_map(nv->scratch.runout->bo[n], 0, NULL);
      if (ret)
         nouveau_bo_ref(NULL, &nv->scratch.runout->bo[--nv->scratch.runout->nr]);
   }
   if (!ret) {
      nv->scratch.current = nv->scratch.runout->bo[n];
      nv->scratch.offset = 0;
      nv->scratch.end = size;
      nv->scratch.map = nv->scratch.current->map;
   }
   return !ret;
}
Beispiel #15
0
static struct pipe_texture *
nv50_miptree_blanket(struct pipe_screen *pscreen, const struct pipe_texture *pt,
		     const unsigned *stride, struct pipe_buffer *pb)
{
	struct nouveau_bo *bo = nouveau_bo(pb);
	struct nv50_miptree *mt;

	/* Only supports 2D, non-mipmapped textures for the moment */
	if (pt->target != PIPE_TEXTURE_2D || pt->last_level != 0 ||
	    pt->depth0 != 1)
		return NULL;

	mt = CALLOC_STRUCT(nv50_miptree);
	if (!mt)
		return NULL;

	mt->base.base = *pt;
	pipe_reference_init(&mt->base.base.reference, 1);
	mt->base.base.screen = pscreen;
	mt->image_nr = 1;
	mt->level[0].pitch = *stride;
	mt->level[0].image_offset = CALLOC(1, sizeof(unsigned));
	mt->level[0].tile_mode = bo->tile_mode;

	nouveau_bo_ref(bo, &mt->base.bo);
	return &mt->base.base;
}
static void
nv50_compute_upload_input(struct nv50_context *nv50, const uint32_t *input)
{
   struct nv50_screen *screen = nv50->screen;
   struct nouveau_pushbuf *push = screen->base.pushbuf;
   unsigned size = align(nv50->compprog->parm_size, 0x4);

   BEGIN_NV04(push, NV50_COMPUTE(USER_PARAM_COUNT), 1);
   PUSH_DATA (push, (size / 4) << 8);

   if (size) {
      struct nouveau_mm_allocation *mm;
      struct nouveau_bo *bo = NULL;
      unsigned offset;

      mm = nouveau_mm_allocate(screen->base.mm_GART, size, &bo, &offset);
      assert(mm);

      nouveau_bo_map(bo, 0, screen->base.client);
      memcpy(bo->map + offset, input, size);

      nouveau_bufctx_refn(nv50->bufctx, 0, bo, NOUVEAU_BO_GART | NOUVEAU_BO_RD);
      nouveau_pushbuf_bufctx(push, nv50->bufctx);
      nouveau_pushbuf_validate(push);

      BEGIN_NV04(push, NV50_COMPUTE(USER_PARAM(0)), size / 4);
      nouveau_pushbuf_data(push, bo, offset, size);

      nouveau_fence_work(screen->base.fence.current, nouveau_mm_free_work, mm);
      nouveau_bo_ref(NULL, &bo);
      nouveau_bufctx_reset(nv50->bufctx, 0);
   }
}
Beispiel #17
0
void
nvc0_miptree_transfer_unmap(struct pipe_context *pctx,
                            struct pipe_transfer *transfer)
{
   struct nvc0_context *nvc0 = nvc0_context(pctx);
   struct nvc0_transfer *tx = (struct nvc0_transfer *)transfer;
   struct nv50_miptree *mt = nv50_miptree(tx->base.resource);
   unsigned i;

   if (tx->base.usage & PIPE_TRANSFER_WRITE) {
      for (i = 0; i < tx->nlayers; ++i) {
         nvc0->m2mf_copy_rect(nvc0, &tx->rect[0], &tx->rect[1],
                              tx->nblocksx, tx->nblocksy);
         if (mt->layout_3d)
            tx->rect[0].z++;
         else
            tx->rect[0].base += mt->layer_stride;
         tx->rect[1].base += tx->nblocksy * tx->base.stride;
      }
   }

   nouveau_bo_ref(NULL, &tx->rect[1].bo);
   pipe_resource_reference(&transfer->resource, NULL);

   FREE(tx);
}
Beispiel #18
0
/* nouveau export reimport test */
static void test_nv_self_import(void)
{
    int prime_fd;
    struct nouveau_bo *nvbo, *nvbo2;

    igt_assert(nouveau_bo_new(ndev, NOUVEAU_BO_GART | NOUVEAU_BO_MAP,
                              0, BO_SIZE, NULL, &nvbo) == 0);
    igt_assert(nouveau_bo_set_prime(nvbo, &prime_fd) == 0);

    igt_assert(nouveau_bo_prime_handle_ref(ndev, prime_fd, &nvbo2) == 0);
    close(prime_fd);

    igt_assert(nvbo->handle == nvbo2->handle);
    nouveau_bo_ref(NULL, &nvbo);
    nouveau_bo_ref(NULL, &nvbo2);
}
Beispiel #19
0
void
nouveau_channel_free(struct nouveau_channel **chan)
{
	struct nouveau_channel_priv *nvchan;
	struct nouveau_device_priv *nvdev;
	struct drm_nouveau_channel_free cf;
	unsigned i;

	if (!chan || !*chan)
		return;
	nvchan = nouveau_channel(*chan);
	(*chan)->flush_notify = NULL;
	*chan = NULL;
	nvdev = nouveau_device(nvchan->base.device);

	FIRE_RING(&nvchan->base);

	nouveau_pushbuf_fini(&nvchan->base);
	if (nvchan->notifier_bo) {
		nouveau_bo_unmap(nvchan->notifier_bo);
		nouveau_bo_ref(NULL, &nvchan->notifier_bo);
	}

	for (i = 0; i < nvchan->drm.nr_subchan; i++)
		free(nvchan->base.subc[i].gr);

	nouveau_grobj_free(&nvchan->base.vram);
	nouveau_grobj_free(&nvchan->base.gart);
	nouveau_grobj_free(&nvchan->base.nullobj);

	cf.channel = nvchan->drm.channel;
	drmCommandWrite(nvdev->fd, DRM_NOUVEAU_CHANNEL_FREE, &cf, sizeof(cf));
	free(nvchan);
}
Beispiel #20
0
/* nouveau export reimport to other driver test */
static void test_nv_self_import_to_different_fd(void)
{
    int prime_fd;
    struct nouveau_bo *nvbo, *nvbo2;

    igt_assert(nouveau_bo_new(ndev, NOUVEAU_BO_GART | NOUVEAU_BO_MAP,
                              0, BO_SIZE, NULL, &nvbo) == 0);
    igt_assert(nouveau_bo_set_prime(nvbo, &prime_fd) == 0);

    igt_assert(nouveau_bo_prime_handle_ref(ndev2, prime_fd, &nvbo2) == 0);
    close(prime_fd);

    /* not sure what to test for, just make sure we don't explode */
    nouveau_bo_ref(NULL, &nvbo);
    nouveau_bo_ref(NULL, &nvbo2);
}
Beispiel #21
0
static void
setup_hierz_buffer(struct gl_context *ctx)
{
	struct nouveau_channel *chan = context_chan(ctx);
	struct nouveau_grobj *celsius = context_eng3d(ctx);
	struct nouveau_bo_context *bctx = context_bctx(ctx, HIERZ);
	struct gl_framebuffer *fb = ctx->DrawBuffer;
	struct nouveau_framebuffer *nfb = to_nouveau_framebuffer(fb);
	unsigned pitch = align(fb->Width, 128),
		height = align(fb->Height, 2),
		size = pitch * height;

	if (!nfb->hierz.bo || nfb->hierz.bo->size != size) {
		nouveau_bo_ref(NULL, &nfb->hierz.bo);
		nouveau_bo_new_tile(context_dev(ctx), NOUVEAU_BO_VRAM, 0, size,
				    0, NOUVEAU_BO_TILE_ZETA, &nfb->hierz.bo);
	}

	nouveau_bo_markl(bctx, celsius, NV17_3D_HIERZ_OFFSET,
			 nfb->hierz.bo, 0, NOUVEAU_BO_VRAM | NOUVEAU_BO_RDWR);

	WAIT_RING(chan, 9);
	BEGIN_RING(chan, celsius, NV17_3D_HIERZ_WINDOW_X, 4);
	OUT_RINGf(chan, - 1792);
	OUT_RINGf(chan, - 2304 + fb->Height);
	OUT_RINGf(chan, fb->_DepthMaxF / 2);
	OUT_RINGf(chan, 0);

	BEGIN_RING(chan, celsius, NV17_3D_HIERZ_PITCH, 1);
	OUT_RING(chan, pitch);

	BEGIN_RING(chan, celsius, NV17_3D_HIERZ_ENABLE, 1);
	OUT_RING(chan, 1);
}
Beispiel #22
0
static int
sfbhack_init(struct drm_device *dev)
{
	struct drm_nouveau_private *dev_priv = dev->dev_private;
	struct nouveau_bo *nvbo = NULL;
	uint32_t tile_flags = dev_priv->card_type == NV_50 ? 0x7000 : 0x0000;
	int ret, size;

	if (dev_priv->sfb_gem)
		return 0;

	size = nouveau_mem_fb_amount(dev);
	if (size > drm_get_resource_len(dev, 1))
		size = drm_get_resource_len(dev, 1);
	size >>= 1;

	ret = nouveau_gem_new(dev, dev_priv->channel, size, 0, TTM_PL_FLAG_VRAM,
			      0, tile_flags, false, true, &nvbo);
	if (ret)
		return ret;

	ret = nouveau_bo_pin(nvbo, TTM_PL_FLAG_VRAM);
	if (ret) {
		nouveau_bo_ref(NULL, &nvbo);
		return ret;
	}

	dev_priv->sfb_gem = nvbo->gem;
	return 0;
}
/* Maybe just migrate to GART right away if we actually need to do this. */
boolean
nouveau_buffer_download(struct nouveau_context *nv, struct nv04_resource *buf,
                        unsigned start, unsigned size)
{
   struct nouveau_mm_allocation *mm;
   struct nouveau_bo *bounce = NULL;
   uint32_t offset;

   assert(buf->domain == NOUVEAU_BO_VRAM);

   mm = nouveau_mm_allocate(nv->screen->mm_GART, size, &bounce, &offset);
   if (!bounce)
      return FALSE;

   nv->copy_data(nv, bounce, offset, NOUVEAU_BO_GART,
                 buf->bo, buf->offset + start, NOUVEAU_BO_VRAM, size);

   if (nouveau_bo_map(bounce, NOUVEAU_BO_RD, nv->screen->client))
      return FALSE;
   memcpy(buf->data + start, (uint8_t *)bounce->map + offset, size);

   buf->status &= ~NOUVEAU_BUFFER_STATUS_GPU_WRITING;

   nouveau_bo_ref(NULL, &bounce);
   if (mm)
      nouveau_mm_free(mm);
   return TRUE;
}
Beispiel #24
0
static bool
nv50_hw_query_allocate(struct nv50_context *nv50, struct nv50_query *q,
                       int size)
{
   struct nv50_screen *screen = nv50->screen;
   struct nv50_hw_query *hq = nv50_hw_query(q);
   int ret;

   if (hq->bo) {
      nouveau_bo_ref(NULL, &hq->bo);
      if (hq->mm) {
         if (hq->state == NV50_HW_QUERY_STATE_READY)
            nouveau_mm_free(hq->mm);
         else
            nouveau_fence_work(screen->base.fence.current,
                               nouveau_mm_free_work, hq->mm);
      }
   }
   if (size) {
      hq->mm = nouveau_mm_allocate(screen->base.mm_GART, size,
                                   &hq->bo, &hq->base_offset);
      if (!hq->bo)
         return false;
      hq->offset = hq->base_offset;

      ret = nouveau_bo_map(hq->bo, 0, screen->base.client);
      if (ret) {
         nv50_hw_query_allocate(nv50, q, 0);
         return false;
      }
      hq->data = (uint32_t *)((uint8_t *)hq->bo->map + hq->base_offset);
   }
   return true;
}
Beispiel #25
0
static GLboolean
nouveau_bufferobj_data(struct gl_context *ctx, GLenum target, GLsizeiptrARB size,
                       const GLvoid *data, GLenum usage,
                       struct gl_buffer_object *obj)
{
    struct nouveau_bufferobj *nbo = to_nouveau_bufferobj(obj);
    int ret;

    obj->Size = size;
    obj->Usage = usage;

    /* Free previous storage */
    nouveau_bo_ref(NULL, &nbo->bo);
    FREE(nbo->sys);

    if (target == GL_ELEMENT_ARRAY_BUFFER_ARB ||
            (size < 512 && usage == GL_DYNAMIC_DRAW_ARB) ||
            context_chipset(ctx) < 0x10) {
        /* Heuristic: keep it in system ram */
        nbo->sys = MALLOC(size);

    } else {
        /* Get a hardware BO */
        ret = nouveau_bo_new(context_dev(ctx),
                             NOUVEAU_BO_GART | NOUVEAU_BO_MAP, 0,
                             size, &nbo->bo);
        assert(!ret);
    }

    if (data)
        memcpy(get_bufferobj_map(obj, NOUVEAU_BO_WR), data, size);

    return GL_TRUE;
}
static GLboolean
nouveau_bufferobj_data(GLcontext *ctx, GLenum target, GLsizeiptrARB size,
		       const GLvoid *data, GLenum usage,
		       struct gl_buffer_object *obj)
{
	struct nouveau_bufferobj *nbo = to_nouveau_bufferobj(obj);
	int ret;

	obj->Size = size;
	obj->Usage = usage;

	nouveau_bo_ref(NULL, &nbo->bo);
	ret = nouveau_bo_new(context_dev(ctx),
			     NOUVEAU_BO_GART | NOUVEAU_BO_MAP, 0,
			     size, &nbo->bo);
	assert(!ret);

	if (data) {
		nouveau_bo_map(nbo->bo, NOUVEAU_BO_WR);
		memcpy(nbo->bo->map, data, size);
		nouveau_bo_unmap(nbo->bo);
	}

	return GL_TRUE;
}
static boolean
nv50_query_allocate(struct nv50_context *nv50, struct nv50_query *q, int size)
{
   struct nv50_screen *screen = nv50->screen;
   int ret;

   if (q->bo) {
      nouveau_bo_ref(NULL, &q->bo);
      if (q->mm) {
         if (q->ready)
            nouveau_mm_free(q->mm);
         else
            nouveau_fence_work(screen->base.fence.current, nouveau_mm_free_work,
                               q->mm);
      }
   }
   if (size) {
      q->mm = nouveau_mm_allocate(screen->base.mm_GART, size, &q->bo, &q->base);
      if (!q->bo)
         return FALSE;
      q->offset = q->base;

      ret = nouveau_bo_map(q->bo, 0, screen->base.client);
      if (ret) {
         nv50_query_allocate(nv50, q, 0);
         return FALSE;
      }
      q->data = (uint32_t *)((uint8_t *)q->bo->map + q->base);
   }
   return TRUE;
}
Beispiel #28
0
static int
nouveau_prime_new(struct drm_device *dev,
		  size_t size,
		  struct sg_table *sg,
		  struct nouveau_bo **pnvbo)
{
	struct nouveau_bo *nvbo;
	u32 flags = 0;
	int ret;

	flags = TTM_PL_FLAG_TT;

	ret = nouveau_bo_new(dev, size, 0, flags, 0, 0,
			     sg, pnvbo);
	if (ret)
		return ret;
	nvbo = *pnvbo;

	/* we restrict allowed domains on nv50+ to only the types
	 * that were requested at creation time.  not possibly on
	 * earlier chips without busting the ABI.
	 */
	nvbo->valid_domains = NOUVEAU_GEM_DOMAIN_GART;
	nvbo->gem = drm_gem_object_alloc(dev, nvbo->bo.mem.size);
	if (!nvbo->gem) {
		nouveau_bo_ref(NULL, pnvbo);
		return -ENOMEM;
	}

	nvbo->gem->driver_private = nvbo;
	return 0;
}
Beispiel #29
0
/* Migrate a linear buffer (vertex, index, constants) USER -> GART -> VRAM. */
boolean
nouveau_buffer_migrate(struct nouveau_context *nv,
                       struct nv04_resource *buf, const unsigned new_domain)
{
   struct nouveau_screen *screen = nv->screen;
   struct nouveau_bo *bo;
   const unsigned old_domain = buf->domain;
   unsigned size = buf->base.width0;
   unsigned offset;
   int ret;

   assert(new_domain != old_domain);

   if (new_domain == NOUVEAU_BO_GART && old_domain == 0) {
      if (!nouveau_buffer_allocate(screen, buf, new_domain))
         return FALSE;
      ret = nouveau_bo_map_range(buf->bo, buf->offset, size, NOUVEAU_BO_WR |
                                 NOUVEAU_BO_NOSYNC);
      if (ret)
         return ret;
      memcpy(buf->bo->map, buf->data, size);
      nouveau_bo_unmap(buf->bo);
      FREE(buf->data);
   } else
   if (old_domain != 0 && new_domain != 0) {
      struct nouveau_mm_allocation *mm = buf->mm;

      if (new_domain == NOUVEAU_BO_VRAM) {
         /* keep a system memory copy of our data in case we hit a fallback */
         if (!nouveau_buffer_data_fetch(buf, buf->bo, buf->offset, size))
            return FALSE;
         if (nouveau_mesa_debug)
            debug_printf("migrating %u KiB to VRAM\n", size / 1024);
      }

      offset = buf->offset;
      bo = buf->bo;
      buf->bo = NULL;
      buf->mm = NULL;
      nouveau_buffer_allocate(screen, buf, new_domain);

      nv->copy_data(nv, buf->bo, buf->offset, new_domain,
                    bo, offset, old_domain, buf->base.width0);

      nouveau_bo_ref(NULL, &bo);
      if (mm)
         release_allocation(&mm, screen->fence.current);
   } else
   if (new_domain == NOUVEAU_BO_VRAM && old_domain == 0) {
      if (!nouveau_buffer_allocate(screen, buf, NOUVEAU_BO_VRAM))
         return FALSE;
      if (!nouveau_buffer_upload(nv, buf, 0, buf->base.width0))
         return FALSE;
   } else
      return FALSE;

   assert(buf->domain == new_domain);
   return TRUE;
}
Beispiel #30
0
static void
nv30_miptree_destroy(struct pipe_screen *pscreen, struct pipe_resource *pt)
{
   struct nv30_miptree *mt = nv30_miptree(pt);

   nouveau_bo_ref(NULL, &mt->base.bo);
   FREE(mt);
}