Exemplo n.º 1
0
int
nv84_fence_create(struct nouveau_drm *drm)
{
	struct nouveau_fifo *pfifo = nouveau_fifo(drm->device);
	struct nv84_fence_priv *priv;
	int ret;

	priv = drm->fence = kzalloc(sizeof(*priv), GFP_KERNEL);
	if (!priv)
		return -ENOMEM;

	priv->base.dtor = nv84_fence_destroy;
	priv->base.suspend = nv84_fence_suspend;
	priv->base.resume = nv84_fence_resume;
	priv->base.context_new = nv84_fence_context_new;
	priv->base.context_del = nv84_fence_context_del;

#ifdef __NetBSD__
	spin_lock_init(&priv->base.waitlock);
	DRM_INIT_WAITQUEUE(&priv->base.waitqueue, "nvfenceq");
#else
	init_waitqueue_head(&priv->base.waiting);
#endif
	priv->base.uevent = true;

	ret = nouveau_bo_new(drm->dev, 16 * (pfifo->max + 1), 0,
			     TTM_PL_FLAG_VRAM, 0, 0, NULL, &priv->bo);
	if (ret == 0) {
		ret = nouveau_bo_pin(priv->bo, TTM_PL_FLAG_VRAM);
		if (ret == 0) {
			ret = nouveau_bo_map(priv->bo);
			if (ret)
				nouveau_bo_unpin(priv->bo);
		}
		if (ret)
			nouveau_bo_ref(NULL, &priv->bo);
	}

	if (ret == 0)
		ret = nouveau_bo_new(drm->dev, 16 * (pfifo->max + 1), 0,
				     TTM_PL_FLAG_TT, 0, 0, NULL,
				     &priv->bo_gart);
	if (ret == 0) {
		ret = nouveau_bo_pin(priv->bo_gart, TTM_PL_FLAG_TT);
		if (ret == 0) {
			ret = nouveau_bo_map(priv->bo_gart);
			if (ret)
				nouveau_bo_unpin(priv->bo_gart);
		}
		if (ret)
			nouveau_bo_ref(NULL, &priv->bo_gart);
	}

	if (ret)
		nv84_fence_destroy(drm);
	return ret;
}
Exemplo n.º 2
0
static GLboolean
nouveau_bufferobj_data(struct gl_context *ctx, GLenum target, GLsizeiptrARB size,
                       const GLvoid *data, GLenum usage,
                       struct gl_buffer_object *obj)
{
    struct nouveau_bufferobj *nbo = to_nouveau_bufferobj(obj);
    int ret;

    obj->Size = size;
    obj->Usage = usage;

    /* Free previous storage */
    nouveau_bo_ref(NULL, &nbo->bo);
    FREE(nbo->sys);

    if (target == GL_ELEMENT_ARRAY_BUFFER_ARB ||
            (size < 512 && usage == GL_DYNAMIC_DRAW_ARB) ||
            context_chipset(ctx) < 0x10) {
        /* Heuristic: keep it in system ram */
        nbo->sys = MALLOC(size);

    } else {
        /* Get a hardware BO */
        ret = nouveau_bo_new(context_dev(ctx),
                             NOUVEAU_BO_GART | NOUVEAU_BO_MAP, 0,
                             size, &nbo->bo);
        assert(!ret);
    }

    if (data)
        memcpy(get_bufferobj_map(obj, NOUVEAU_BO_WR), data, size);

    return GL_TRUE;
}
Exemplo n.º 3
0
static GLboolean
nouveau_bufferobj_data(GLcontext *ctx, GLenum target, GLsizeiptrARB size,
		       const GLvoid *data, GLenum usage,
		       struct gl_buffer_object *obj)
{
	struct nouveau_bufferobj *nbo = to_nouveau_bufferobj(obj);
	int ret;

	obj->Size = size;
	obj->Usage = usage;

	nouveau_bo_ref(NULL, &nbo->bo);
	ret = nouveau_bo_new(context_dev(ctx),
			     NOUVEAU_BO_GART | NOUVEAU_BO_MAP, 0,
			     size, &nbo->bo);
	assert(!ret);

	if (data) {
		nouveau_bo_map(nbo->bo, NOUVEAU_BO_WR);
		memcpy(nbo->bo->map, data, size);
		nouveau_bo_unmap(nbo->bo);
	}

	return GL_TRUE;
}
static INLINE int
nouveau_scratch_bo_alloc(struct nouveau_context *nv, struct nouveau_bo **pbo,
                         unsigned size)
{
   return nouveau_bo_new(nv->screen->device, NOUVEAU_BO_GART | NOUVEAU_BO_MAP,
                         4096, size, NULL, pbo);
}
Exemplo n.º 5
0
static int
nouveau_prime_new(struct drm_device *dev,
		  size_t size,
		  struct sg_table *sg,
		  struct nouveau_bo **pnvbo)
{
	struct nouveau_bo *nvbo;
	u32 flags = 0;
	int ret;

	flags = TTM_PL_FLAG_TT;

	ret = nouveau_bo_new(dev, size, 0, flags, 0, 0,
			     sg, pnvbo);
	if (ret)
		return ret;
	nvbo = *pnvbo;

	/* we restrict allowed domains on nv50+ to only the types
	 * that were requested at creation time.  not possibly on
	 * earlier chips without busting the ABI.
	 */
	nvbo->valid_domains = NOUVEAU_GEM_DOMAIN_GART;
	nvbo->gem = drm_gem_object_alloc(dev, nvbo->bo.mem.size);
	if (!nvbo->gem) {
		nouveau_bo_ref(NULL, pnvbo);
		return -ENOMEM;
	}

	nvbo->gem->driver_private = nvbo;
	return 0;
}
Exemplo n.º 6
0
static int
nouveau_prime_new(struct drm_device *dev,
		  size_t size,
		  struct sg_table *sg,
		  struct nouveau_bo **pnvbo)
{
	struct nouveau_bo *nvbo;
	u32 flags = 0;
	int ret;

	flags = TTM_PL_FLAG_TT;

	ret = nouveau_bo_new(dev, size, 0, flags, 0, 0,
			     sg, pnvbo);
	if (ret)
		return ret;
	nvbo = *pnvbo;

	nvbo->valid_domains = NOUVEAU_GEM_DOMAIN_GART;
	nvbo->gem = drm_gem_object_alloc(dev, nvbo->bo.mem.size);
	if (!nvbo->gem) {
		nouveau_bo_ref(NULL, pnvbo);
		return -ENOMEM;
	}

	nvbo->gem->driver_private = nvbo;
	return 0;
}
static struct nouveau_bo *
nouveau_channel_user_pushbuf_alloc(struct drm_device *dev)
{
	struct nouveau_bo *pushbuf = NULL;
	int location, ret;

	if (nouveau_vram_pushbuf)
		location = TTM_PL_FLAG_VRAM;
	else
		location = TTM_PL_FLAG_TT;

	ret = nouveau_bo_new(dev, NULL, 65536, 0, location, 0, 0x0000, false,
			     true, &pushbuf);
	if (ret) {
		NV_ERROR(dev, "error allocating DMA push buffer: %d\n", ret);
		return NULL;
	}

	ret = nouveau_bo_pin(pushbuf, location);
	if (ret) {
		NV_ERROR(dev, "error pinning DMA push buffer: %d\n", ret);
		nouveau_bo_ref(NULL, &pushbuf);
		return NULL;
	}

	return pushbuf;
}
Exemplo n.º 8
0
int
nouveau_gem_new(struct drm_device *dev, struct nouveau_channel *chan,
		int size, int align, uint32_t flags, uint32_t tile_mode,
		uint32_t tile_flags, bool no_vm, bool mappable,
		struct nouveau_bo **pnvbo)
{
	struct nouveau_bo *nvbo;
	int ret;

	ret = nouveau_bo_new(dev, chan, size, align, flags, tile_mode,
			     tile_flags, no_vm, mappable, pnvbo);
	if (ret)
		return ret;
	nvbo = *pnvbo;

	nvbo->gem = drm_gem_object_alloc(dev, nvbo->bo.mem.size);
	if (!nvbo->gem) {
		nouveau_bo_ref(NULL, pnvbo);
		return -ENOMEM;
	}

	nvbo->bo.persistant_swap_storage = nvbo->gem->filp;
	nvbo->gem->driver_private = nvbo;
	return 0;
}
Exemplo n.º 9
0
static void test_nv_i915_reimport_twice_check_flink_name(void)
{
    drm_intel_bo *intel_bo = NULL, *intel_bo2 = NULL;
    int prime_fd;
    struct nouveau_bo *nvbo = NULL;
    uint32_t flink_name1, flink_name2;

    igt_assert(nouveau_bo_new(ndev, NOUVEAU_BO_GART | NOUVEAU_BO_MAP,
                              0, BO_SIZE, NULL, &nvbo) == 0);

    igt_assert(nouveau_bo_set_prime(nvbo, &prime_fd) == 0);

    intel_bo = drm_intel_bo_gem_create_from_prime(bufmgr, prime_fd, BO_SIZE);
    igt_assert(intel_bo);
    close(prime_fd);
    igt_assert(nouveau_bo_set_prime(nvbo, &prime_fd) == 0);

    intel_bo2 = drm_intel_bo_gem_create_from_prime(bufmgr2, prime_fd, BO_SIZE);
    igt_assert(intel_bo2);
    close(prime_fd);

    igt_assert(drm_intel_bo_flink(intel_bo, &flink_name1) == 0);
    igt_assert(drm_intel_bo_flink(intel_bo2, &flink_name2) == 0);

    igt_assert_eq_u32(flink_name1, flink_name2);

    nouveau_bo_ref(NULL, &nvbo);
    drm_intel_bo_unreference(intel_bo);
    drm_intel_bo_unreference(intel_bo2);
}
Exemplo n.º 10
0
static struct pipe_buffer *
nouveau_pipe_bo_create(struct pipe_winsys *ws, unsigned alignment,
		       unsigned usage, unsigned size)
{
	struct nouveau_pipe_winsys *nvpws = nouveau_pipe_winsys(ws);
	struct nouveau_device *dev = nvpws->channel->device;
	struct nouveau_pipe_buffer *nvbuf;
	uint32_t flags;

	nvbuf = CALLOC_STRUCT(nouveau_pipe_buffer);
	if (!nvbuf)
		return NULL;
	pipe_reference_init(&nvbuf->base.reference, 1);
	nvbuf->base.alignment = alignment;
	nvbuf->base.usage = usage;
	nvbuf->base.size = size;

	flags = nouveau_flags_from_usage(ws, usage);
	if (nouveau_bo_new(dev, flags, alignment, size, &nvbuf->bo)) {
		FREE(nvbuf);
		return NULL;
	}

	return &nvbuf->base;
}
Exemplo n.º 11
0
GLboolean
nouveau_context_create(gl_api api,
		       const struct gl_config *visual, __DRIcontext *dri_ctx,
		       unsigned major_version,
		       unsigned minor_version,
		       uint32_t flags,
		       bool notify_reset,
		       unsigned *error,
		       void *share_ctx)
{
	__DRIscreen *dri_screen = dri_ctx->driScreenPriv;
	struct nouveau_screen *screen = dri_screen->driverPrivate;
	struct nouveau_context *nctx;
	struct gl_context *ctx;

	if (flags & ~__DRI_CTX_FLAG_DEBUG) {
		*error = __DRI_CTX_ERROR_UNKNOWN_FLAG;
		return false;
	}

	if (notify_reset) {
		*error = __DRI_CTX_ERROR_UNKNOWN_ATTRIBUTE;
		return false;
	}

	ctx = screen->driver->context_create(screen, visual, share_ctx);
	if (!ctx) {
		*error = __DRI_CTX_ERROR_NO_MEMORY;
		return GL_FALSE;
	}

	driContextSetFlags(ctx, flags);

	nctx = to_nouveau_context(ctx);
	nctx->dri_context = dri_ctx;
	dri_ctx->driverPrivate = ctx;

	_mesa_compute_version(ctx);
	if (ctx->Version < major_version * 10 + minor_version) {
	   nouveau_context_destroy(dri_ctx);
	   *error = __DRI_CTX_ERROR_BAD_VERSION;
	   return GL_FALSE;
	}

	/* Exec table initialization requires the version to be computed */
	_mesa_initialize_dispatch_tables(ctx);
	_mesa_initialize_vbo_vtxfmt(ctx);

	if (nouveau_bo_new(context_dev(ctx), NOUVEAU_BO_VRAM, 0, 4096,
			   NULL, &nctx->fence)) {
		nouveau_context_destroy(dri_ctx);
		*error = __DRI_CTX_ERROR_NO_MEMORY;
		return GL_FALSE;
	}

	*error = __DRI_CTX_ERROR_SUCCESS;
	return GL_TRUE;
}
Exemplo n.º 12
0
static struct pipe_transfer *
nv30_miptree_transfer_new(struct pipe_context *pipe, struct pipe_resource *pt,
                          unsigned level, unsigned usage,
                          const struct pipe_box *box)
{
   struct nv30_context *nv30 = nv30_context(pipe);
   struct nouveau_device *dev = nv30->screen->base.device;
   struct nv30_transfer *tx;
   int ret;

   tx = CALLOC_STRUCT(nv30_transfer);
   if (!tx)
      return NULL;
   pipe_resource_reference(&tx->base.resource, pt);
   tx->base.level = level;
   tx->base.usage = usage;
   tx->base.box = *box;
   tx->base.stride = util_format_get_nblocksx(pt->format, box->width) *
                     util_format_get_blocksize(pt->format);
   tx->base.layer_stride = util_format_get_nblocksy(pt->format, box->height) *
                           tx->base.stride;

   tx->nblocksx = util_format_get_nblocksx(pt->format, box->width);
   tx->nblocksy = util_format_get_nblocksy(pt->format, box->height);

   define_rect(pt, level, box->z, box->x, box->y,
                   tx->nblocksx, tx->nblocksy, &tx->img);

   ret = nouveau_bo_new(dev, NOUVEAU_BO_GART | NOUVEAU_BO_MAP, 0,
                        tx->base.layer_stride, NULL, &tx->tmp.bo);
   if (ret) {
      pipe_resource_reference(&tx->base.resource, NULL);
      FREE(tx);
      return NULL;
   }

   tx->tmp.domain = NOUVEAU_BO_GART;
   tx->tmp.offset = 0;
   tx->tmp.pitch  = tx->base.stride;
   tx->tmp.cpp    = tx->img.cpp;
   tx->tmp.w      = tx->nblocksx;
   tx->tmp.h      = tx->nblocksy;
   tx->tmp.d      = 1;
   tx->tmp.x0     = 0;
   tx->tmp.y0     = 0;
   tx->tmp.x1     = tx->tmp.w;
   tx->tmp.y1     = tx->tmp.h;
   tx->tmp.z      = 0;

   if (usage & PIPE_TRANSFER_READ)
      nv30_transfer_rect(nv30, NEAREST, &tx->img, &tx->tmp);

   return &tx->base;
}
Exemplo n.º 13
0
struct pipe_screen *
nv50_screen_create(struct nouveau_device *dev)
{
   struct nv50_screen *screen;
   struct pipe_screen *pscreen;
   struct nouveau_object *chan;
   uint64_t value;
   uint32_t tesla_class;
   unsigned stack_size, max_warps, tls_space;
   int ret;

   screen = CALLOC_STRUCT(nv50_screen);
   if (!screen)
      return NULL;
   pscreen = &screen->base.base;

   screen->base.sysmem_bindings = PIPE_BIND_CONSTANT_BUFFER;

   ret = nouveau_screen_init(&screen->base, dev);
   if (ret)
      FAIL_SCREEN_INIT("nouveau_screen_init failed: %d\n", ret);

   screen->base.pushbuf->user_priv = screen;
   screen->base.pushbuf->rsvd_kick = 5;

   chan = screen->base.channel;

   pscreen->destroy = nv50_screen_destroy;
   pscreen->context_create = nv50_create;
   pscreen->is_format_supported = nv50_screen_is_format_supported;
   pscreen->get_param = nv50_screen_get_param;
   pscreen->get_shader_param = nv50_screen_get_shader_param;
   pscreen->get_paramf = nv50_screen_get_paramf;

   nv50_screen_init_resource_functions(pscreen);

   nouveau_screen_init_vdec(&screen->base);

   ret = nouveau_bo_new(dev, NOUVEAU_BO_GART | NOUVEAU_BO_MAP, 0, 4096,
                        NULL, &screen->fence.bo);
   if (ret)
      goto fail;
   nouveau_bo_map(screen->fence.bo, 0, NULL);
   screen->fence.map = screen->fence.bo->map;
   screen->base.fence.emit = nv50_screen_fence_emit;
   screen->base.fence.update = nv50_screen_fence_update;

   ret = nouveau_object_new(chan, 0xbeef0301, NOUVEAU_NOTIFIER_CLASS,
                            &(struct nv04_notify){ .length = 32 },
                            sizeof(struct nv04_notify), &screen->sync);
Exemplo n.º 14
0
/* nouveau export reimport to other driver test */
static void test_nv_self_import_to_different_fd(void)
{
    int prime_fd;
    struct nouveau_bo *nvbo, *nvbo2;

    igt_assert(nouveau_bo_new(ndev, NOUVEAU_BO_GART | NOUVEAU_BO_MAP,
                              0, BO_SIZE, NULL, &nvbo) == 0);
    igt_assert(nouveau_bo_set_prime(nvbo, &prime_fd) == 0);

    igt_assert(nouveau_bo_prime_handle_ref(ndev2, prime_fd, &nvbo2) == 0);
    close(prime_fd);

    /* not sure what to test for, just make sure we don't explode */
    nouveau_bo_ref(NULL, &nvbo);
    nouveau_bo_ref(NULL, &nvbo2);
}
Exemplo n.º 15
0
/* nouveau export reimport test */
static void test_nv_self_import(void)
{
    int prime_fd;
    struct nouveau_bo *nvbo, *nvbo2;

    igt_assert(nouveau_bo_new(ndev, NOUVEAU_BO_GART | NOUVEAU_BO_MAP,
                              0, BO_SIZE, NULL, &nvbo) == 0);
    igt_assert(nouveau_bo_set_prime(nvbo, &prime_fd) == 0);

    igt_assert(nouveau_bo_prime_handle_ref(ndev, prime_fd, &nvbo2) == 0);
    close(prime_fd);

    igt_assert(nvbo->handle == nvbo2->handle);
    nouveau_bo_ref(NULL, &nvbo);
    nouveau_bo_ref(NULL, &nvbo2);
}
Exemplo n.º 16
0
int
nouveau_gem_new(struct drm_device *dev, int size, int align, uint32_t domain,
		uint32_t tile_mode, uint32_t tile_flags,
		struct nouveau_bo **pnvbo)
{
	struct nouveau_drm *drm = nouveau_drm(dev);
	struct nouveau_bo *nvbo;
	u32 flags = 0;
	int ret;

	if (domain & NOUVEAU_GEM_DOMAIN_VRAM)
		flags |= TTM_PL_FLAG_VRAM;
	if (domain & NOUVEAU_GEM_DOMAIN_GART)
		flags |= TTM_PL_FLAG_TT;
	if (!flags || domain & NOUVEAU_GEM_DOMAIN_CPU)
		flags |= TTM_PL_FLAG_SYSTEM;

	if (domain & NOUVEAU_GEM_DOMAIN_MAPPABLE)
		flags |= TTM_PL_FLAG_UNCACHED;

	ret = nouveau_bo_new(dev, size, align, flags, tile_mode,
			     tile_flags, NULL, NULL, pnvbo);
	if (ret)
		return ret;
	nvbo = *pnvbo;

	/* we restrict allowed domains on nv50+ to only the types
	 * that were requested at creation time.  not possibly on
	 * earlier chips without busting the ABI.
	 */
	nvbo->valid_domains = NOUVEAU_GEM_DOMAIN_VRAM |
			      NOUVEAU_GEM_DOMAIN_GART;
	if (drm->device.info.family >= NV_DEVICE_INFO_V0_TESLA)
		nvbo->valid_domains &= domain;

	/* Initialize the embedded gem-object. We return a single gem-reference
	 * to the caller, instead of a normal nouveau_bo ttm reference. */
	ret = drm_gem_object_init(dev, &nvbo->gem, nvbo->bo.mem.size);
	if (ret) {
		nouveau_bo_ref(NULL, pnvbo);
		return -ENOMEM;
	}

	nvbo->bo.persistent_swap_storage = nvbo->gem.filp;
	return 0;
}
Exemplo n.º 17
0
int
nv84_fence_create(struct nouveau_drm *drm)
{
	struct nv84_fence_priv *priv;
	u32 domain;
	int ret;

	priv = drm->fence = kzalloc(sizeof(*priv), GFP_KERNEL);
	if (!priv)
		return -ENOMEM;

	priv->base.dtor = nv84_fence_destroy;
	priv->base.suspend = nv84_fence_suspend;
	priv->base.resume = nv84_fence_resume;
	priv->base.context_new = nv84_fence_context_new;
	priv->base.context_del = nv84_fence_context_del;

	priv->base.uevent = true;

	mutex_init(&priv->mutex);

	/* Use VRAM if there is any ; otherwise fallback to system memory */
	domain = drm->client.device.info.ram_size != 0 ? TTM_PL_FLAG_VRAM :
			 /*
			  * fences created in sysmem must be non-cached or we
			  * will lose CPU/GPU coherency!
			  */
			 TTM_PL_FLAG_TT | TTM_PL_FLAG_UNCACHED;
	ret = nouveau_bo_new(&drm->client, 16 * drm->chan.nr, 0,
			     domain, 0, 0, NULL, NULL, &priv->bo);
	if (ret == 0) {
		ret = nouveau_bo_pin(priv->bo, domain, false);
		if (ret == 0) {
			ret = nouveau_bo_map(priv->bo);
			if (ret)
				nouveau_bo_unpin(priv->bo);
		}
		if (ret)
			nouveau_bo_ref(NULL, &priv->bo);
	}

	if (ret)
		nv84_fence_destroy(drm);
	return ret;
}
Exemplo n.º 18
0
int
nv10_fence_create(struct drm_device *dev)
{
	struct drm_nouveau_private *dev_priv = dev->dev_private;
	struct nv10_fence_priv *priv;
	int ret = 0;

	priv = kzalloc(sizeof(*priv), GFP_KERNEL);
	if (!priv)
		return -ENOMEM;

	priv->base.engine.destroy = nv10_fence_destroy;
	priv->base.engine.init = nv10_fence_init;
	priv->base.engine.fini = nv10_fence_fini;
	priv->base.engine.context_new = nv10_fence_context_new;
	priv->base.engine.context_del = nv10_fence_context_del;
	priv->base.emit = nv10_fence_emit;
	priv->base.read = nv10_fence_read;
	priv->base.sync = nv10_fence_sync;
	dev_priv->eng[NVOBJ_ENGINE_FENCE] = &priv->base.engine;
	spin_lock_init(&priv->lock);

	if (dev_priv->chipset >= 0x17) {
		ret = nouveau_bo_new(dev, 4096, 0x1000, TTM_PL_FLAG_VRAM,
				     0, 0x0000, NULL, &priv->bo);
		if (!ret) {
			ret = nouveau_bo_pin(priv->bo, TTM_PL_FLAG_VRAM);
			if (!ret)
				ret = nouveau_bo_map(priv->bo);
			if (ret)
				nouveau_bo_ref(NULL, &priv->bo);
		}

		if (ret == 0) {
			nouveau_bo_wr32(priv->bo, 0x000, 0x00000000);
			priv->base.sync = nv17_fence_sync;
		}
	}

	if (ret)
		nv10_fence_destroy(dev, NVOBJ_ENGINE_FENCE);
	return ret;
}
Exemplo n.º 19
0
static struct pipe_query *
nv50_query_create(struct pipe_context *pipe, unsigned type)
{
	struct nouveau_device *dev = nouveau_screen(pipe->screen)->device;
	struct nv50_query *q = CALLOC_STRUCT(nv50_query);
	int ret;

	assert (q->type == PIPE_QUERY_OCCLUSION_COUNTER);
	q->type = type;

	ret = nouveau_bo_new(dev, NOUVEAU_BO_GART | NOUVEAU_BO_MAP, 256,
			     16, &q->bo);
	if (ret) {
		FREE(q);
		return NULL;
	}

	return (struct pipe_query *)q;
}
Exemplo n.º 20
0
int
nouveau_gem_new(struct drm_device *dev, int size, int align, uint32_t domain,
                uint32_t tile_mode, uint32_t tile_flags,
                struct nouveau_bo **pnvbo)
{
    struct nouveau_drm *drm = nouveau_drm(dev);
    struct nouveau_bo *nvbo;
    u32 flags = 0;
    int ret;

    if (domain & NOUVEAU_GEM_DOMAIN_VRAM)
        flags |= TTM_PL_FLAG_VRAM;
    if (domain & NOUVEAU_GEM_DOMAIN_GART)
        flags |= TTM_PL_FLAG_TT;
    if (!flags || domain & NOUVEAU_GEM_DOMAIN_CPU)
        flags |= TTM_PL_FLAG_SYSTEM;

    ret = nouveau_bo_new(dev, size, align, flags, tile_mode,
                         tile_flags, NULL, pnvbo);
    if (ret)
        return ret;
    nvbo = *pnvbo;

    /* we restrict allowed domains on nv50+ to only the types
     * that were requested at creation time.  not possibly on
     * earlier chips without busting the ABI.
     */
    nvbo->valid_domains = NOUVEAU_GEM_DOMAIN_VRAM |
                          NOUVEAU_GEM_DOMAIN_GART;
    if (nv_device(drm->device)->card_type >= NV_50)
        nvbo->valid_domains &= domain;

    nvbo->gem = drm_gem_object_alloc(dev, nvbo->bo.mem.size);
    if (!nvbo->gem) {
        nouveau_bo_ref(NULL, pnvbo);
        return -ENOMEM;
    }

    nvbo->bo.persistent_swap_storage = nvbo->gem->filp;
    nvbo->gem->driver_private = nvbo;
    return 0;
}
Exemplo n.º 21
0
static void
setup_hierz_buffer(struct gl_context *ctx)
{
	struct nouveau_pushbuf *push = context_push(ctx);
	struct gl_framebuffer *fb = ctx->DrawBuffer;
	struct nouveau_framebuffer *nfb = to_nouveau_framebuffer(fb);
	unsigned pitch = align(fb->Width, 128),
		height = align(fb->Height, 2),
		size = pitch * height;

	if (!nfb->hierz.bo || nfb->hierz.bo->size != size) {
		nouveau_bo_ref(NULL, &nfb->hierz.bo);
		nouveau_bo_new(context_dev(ctx), NOUVEAU_BO_VRAM, 0, size,
			       NULL, &nfb->hierz.bo);
	}

	BEGIN_NV04(push, NV25_3D(HIERZ_PITCH), 1);
	PUSH_DATA (push, pitch);
	BEGIN_NV04(push, NV25_3D(HIERZ_OFFSET), 1);
	PUSH_MTHDl(push, NV25_3D(HIERZ_OFFSET), BUFCTX_FB,
			 nfb->hierz.bo, 0, NOUVEAU_BO_VRAM | NOUVEAU_BO_RDWR);
}
int
nouveau_gem_new(struct drm_device *dev, int size, int align, uint32_t domain,
		uint32_t tile_mode, uint32_t tile_flags,
		struct nouveau_bo **pnvbo)
{
	struct drm_nouveau_private *dev_priv = dev->dev_private;
	struct nouveau_bo *nvbo;
	u32 flags = 0;
	int ret;

	if (domain & NOUVEAU_GEM_DOMAIN_VRAM)
		flags |= TTM_PL_FLAG_VRAM;
	if (domain & NOUVEAU_GEM_DOMAIN_GART)
		flags |= TTM_PL_FLAG_TT;
	if (!flags || domain & NOUVEAU_GEM_DOMAIN_CPU)
		flags |= TTM_PL_FLAG_SYSTEM;

	ret = nouveau_bo_new(dev, size, align, flags, tile_mode,
			     tile_flags, pnvbo);
	if (ret)
		return ret;
	nvbo = *pnvbo;

	nvbo->valid_domains = NOUVEAU_GEM_DOMAIN_VRAM |
			      NOUVEAU_GEM_DOMAIN_GART;
	if (dev_priv->card_type >= NV_50)
		nvbo->valid_domains &= domain;

	nvbo->gem = drm_gem_object_alloc(dev, nvbo->bo.mem.size);
	if (!nvbo->gem) {
		nouveau_bo_ref(NULL, pnvbo);
		return -ENOMEM;
	}

	nvbo->bo.persistent_swap_storage = nvbo->gem->filp;
	nvbo->gem->driver_private = nvbo;
	return 0;
}
Exemplo n.º 23
0
static int nv50_tls_alloc(struct nv50_screen *screen, unsigned tls_space,
      uint64_t *tls_size)
{
   struct nouveau_device *dev = screen->base.device;
   int ret;

   screen->cur_tls_space = util_next_power_of_two(tls_space / ONE_TEMP_SIZE) *
         ONE_TEMP_SIZE;
   if (nouveau_mesa_debug)
      debug_printf("allocating space for %u temps\n",
            util_next_power_of_two(tls_space / ONE_TEMP_SIZE));
   *tls_size = screen->cur_tls_space * util_next_power_of_two(screen->TPs) *
         screen->MPsInTP * LOCAL_WARPS_ALLOC * THREADS_IN_WARP;

   ret = nouveau_bo_new(dev, NOUVEAU_BO_VRAM, 1 << 16,
                        *tls_size, NULL, &screen->tls_bo);
   if (ret) {
      NOUVEAU_ERR("Failed to allocate local bo: %d\n", ret);
      return ret;
   }

   return 0;
}
Exemplo n.º 24
0
int
nvc0_screen_compute_setup(struct nvc0_screen *screen,
                          struct nouveau_pushbuf *push)
{
   struct nouveau_object *chan = screen->base.channel;
   struct nouveau_device *dev = screen->base.device;
   uint32_t obj_class;
   int ret;
   int i;

   switch (dev->chipset & ~0xf) {
   case 0xc0:
      if (dev->chipset == 0xc8)
         obj_class = NVC8_COMPUTE_CLASS;
      else
         obj_class = NVC0_COMPUTE_CLASS;
      break;
   case 0xd0:
      obj_class = NVC0_COMPUTE_CLASS;
      break;
   default:
      NOUVEAU_ERR("unsupported chipset: NV%02x\n", dev->chipset);
      return -1;
   }

   ret = nouveau_object_new(chan, 0xbeef90c0, obj_class, NULL, 0,
                            &screen->compute);
   if (ret) {
      NOUVEAU_ERR("Failed to allocate compute object: %d\n", ret);
      return ret;
   }

   ret = nouveau_bo_new(dev, NOUVEAU_BO_VRAM, 0, 1 << 12, NULL,
                        &screen->parm);
   if (ret)
      return ret;

   BEGIN_NVC0(push, SUBC_COMPUTE(NV01_SUBCHAN_OBJECT), 1);
   PUSH_DATA (push, screen->compute->oclass);

   /* hardware limit */
   BEGIN_NVC0(push, NVC0_COMPUTE(MP_LIMIT), 1);
   PUSH_DATA (push, screen->mp_count);
   BEGIN_NVC0(push, NVC0_COMPUTE(CALL_LIMIT_LOG), 1);
   PUSH_DATA (push, 0xf);

   BEGIN_NVC0(push, SUBC_COMPUTE(0x02a0), 1);
   PUSH_DATA (push, 0x8000);

   /* global memory setup */
   BEGIN_NVC0(push, SUBC_COMPUTE(0x02c4), 1);
   PUSH_DATA (push, 0);
   BEGIN_NIC0(push, NVC0_COMPUTE(GLOBAL_BASE), 0x100);
   for (i = 0; i <= 0xff; i++)
      PUSH_DATA (push, (0xc << 28) | (i << 16) | i);
   BEGIN_NVC0(push, SUBC_COMPUTE(0x02c4), 1);
   PUSH_DATA (push, 1);

   /* local memory and cstack setup */
   BEGIN_NVC0(push, NVC0_COMPUTE(TEMP_ADDRESS_HIGH), 2);
   PUSH_DATAh(push, screen->tls->offset);
   PUSH_DATA (push, screen->tls->offset);
   BEGIN_NVC0(push, NVC0_COMPUTE(TEMP_SIZE_HIGH), 2);
   PUSH_DATAh(push, screen->tls->size);
   PUSH_DATA (push, screen->tls->size);
   BEGIN_NVC0(push, NVC0_COMPUTE(WARP_TEMP_ALLOC), 1);
   PUSH_DATA (push, 0);
   BEGIN_NVC0(push, NVC0_COMPUTE(LOCAL_BASE), 1);
   PUSH_DATA (push, 1 << 24);

   /* shared memory setup */
   BEGIN_NVC0(push, NVC0_COMPUTE(CACHE_SPLIT), 1);
   PUSH_DATA (push, NVC0_COMPUTE_CACHE_SPLIT_48K_SHARED_16K_L1);
   BEGIN_NVC0(push, NVC0_COMPUTE(SHARED_BASE), 1);
   PUSH_DATA (push, 2 << 24);
   BEGIN_NVC0(push, NVC0_COMPUTE(SHARED_SIZE), 1);
   PUSH_DATA (push, 0);

   /* code segment setup */
   BEGIN_NVC0(push, NVC0_COMPUTE(CODE_ADDRESS_HIGH), 2);
   PUSH_DATAh(push, screen->text->offset);
   PUSH_DATA (push, screen->text->offset);

   /* bind parameters buffer */
   BEGIN_NVC0(push, NVC0_COMPUTE(CB_SIZE), 3);
   PUSH_DATA (push, screen->parm->size);
   PUSH_DATAh(push, screen->parm->offset);
   PUSH_DATA (push, screen->parm->offset);
   BEGIN_NVC0(push, NVC0_COMPUTE(CB_BIND), 1);
   PUSH_DATA (push, (0 << 8) | 1);

   /* TODO: textures & samplers */

   return 0;
}
Exemplo n.º 25
0
struct pipe_screen *
nvfx_screen_create(struct nouveau_device *dev)
{
	static const unsigned query_sizes[] = {(4096 - 4 * 32) / 32, 3 * 1024 / 32, 2 * 1024 / 32, 1024 / 32};
	struct nvfx_screen *screen = CALLOC_STRUCT(nvfx_screen);
	struct nouveau_channel *chan;
	struct pipe_screen *pscreen;
	unsigned eng3d_class = 0;
	int ret, i;

	if (!screen)
		return NULL;

	pscreen = &screen->base.base;

	ret = nouveau_screen_init(&screen->base, dev);
	if (ret) {
		nvfx_screen_destroy(pscreen);
		return NULL;
	}
	chan = screen->base.channel;
	screen->cur_ctx = NULL;
	chan->user_private = screen;
	chan->flush_notify = nvfx_channel_flush_notify;

	pscreen->destroy = nvfx_screen_destroy;
	pscreen->get_param = nvfx_screen_get_param;
	pscreen->get_shader_param = nvfx_screen_get_shader_param;
	pscreen->get_paramf = nvfx_screen_get_paramf;
	pscreen->get_video_param = nvfx_screen_get_video_param;
	pscreen->is_format_supported = nvfx_screen_is_format_supported;
	pscreen->is_video_format_supported = vl_video_buffer_is_format_supported;
	pscreen->context_create = nvfx_create;

	ret = nouveau_bo_new(dev, NOUVEAU_BO_VRAM, 0, 4096, &screen->fence);
	if (ret) {
		nvfx_screen_destroy(pscreen);
		return NULL;
	}

	switch (dev->chipset & 0xf0) {
	case 0x30:
		if (NV30_3D_CHIPSET_3X_MASK & (1 << (dev->chipset & 0x0f)))
			eng3d_class = NV30_3D;
		else if (NV34_3D_CHIPSET_3X_MASK & (1 << (dev->chipset & 0x0f)))
			eng3d_class = NV34_3D;
		else if (NV35_3D_CHIPSET_3X_MASK & (1 << (dev->chipset & 0x0f)))
			eng3d_class = NV35_3D;
		break;
	case 0x40:
		if (NV4X_GRCLASS4097_CHIPSETS & (1 << (dev->chipset & 0x0f)))
			eng3d_class = NV40_3D;
		else if (NV4X_GRCLASS4497_CHIPSETS & (1 << (dev->chipset & 0x0f)))
			eng3d_class = NV44_3D;
		screen->is_nv4x = ~0;
		break;
	case 0x60:
		if (NV6X_GRCLASS4497_CHIPSETS & (1 << (dev->chipset & 0x0f)))
			eng3d_class = NV44_3D;
		screen->is_nv4x = ~0;
		break;
	}

	if (!eng3d_class) {
		NOUVEAU_ERR("Unknown nv3x/nv4x chipset: nv%02x\n", dev->chipset);
		return NULL;
	}

	screen->advertise_npot = !!screen->is_nv4x;
	screen->advertise_blend_equation_separate = !!screen->is_nv4x;
	screen->use_nv4x = screen->is_nv4x;

	if(screen->is_nv4x) {
		if(debug_get_bool_option("NVFX_SIMULATE_NV30", FALSE))
			screen->use_nv4x = 0;
		if(!debug_get_bool_option("NVFX_NPOT", TRUE))
			screen->advertise_npot = 0;
		if(!debug_get_bool_option("NVFX_BLEND_EQ_SEP", TRUE))
			screen->advertise_blend_equation_separate = 0;
	}

	screen->force_swtnl = debug_get_bool_option("NVFX_SWTNL", FALSE);
	screen->trace_draw = debug_get_bool_option("NVFX_TRACE_DRAW", FALSE);

	screen->buffer_allocation_cost = debug_get_num_option("NVFX_BUFFER_ALLOCATION_COST", 16384);
	screen->inline_cost_per_hardware_cost = atof(debug_get_option("NVFX_INLINE_COST_PER_HARDWARE_COST", "1.0"));
	screen->static_reuse_threshold = atof(debug_get_option("NVFX_STATIC_REUSE_THRESHOLD", "2.0"));

	/* We don't advertise these by default because filtering and blending doesn't work as
	 * it should, due to several restrictions.
	 * The only exception is fp16 on nv40.
	 */
	screen->advertise_fp16 = debug_get_bool_option("NVFX_FP16", !!screen->use_nv4x);
	screen->advertise_fp32 = debug_get_bool_option("NVFX_FP32", 0);

	screen->vertex_buffer_reloc_flags = nvfx_screen_get_vertex_buffer_flags(screen);

	/* surely both nv3x and nv44 support index buffers too: find out how and test that */
	if(eng3d_class == NV40_3D)
		screen->index_buffer_reloc_flags = screen->vertex_buffer_reloc_flags;

	if(!screen->force_swtnl && screen->vertex_buffer_reloc_flags == screen->index_buffer_reloc_flags)
		screen->base.vertex_buffer_flags = screen->base.index_buffer_flags = screen->vertex_buffer_reloc_flags;

	nvfx_screen_init_resource_functions(pscreen);

	ret = nouveau_grobj_alloc(chan, 0xbeef3097, eng3d_class, &screen->eng3d);
	if (ret) {
		NOUVEAU_ERR("Error creating 3D object: %d\n", ret);
		return FALSE;
	}

	/* 2D engine setup */
	nvfx_screen_surface_init(pscreen);

	/* Notifier for sync purposes */
	ret = nouveau_notifier_alloc(chan, 0xbeef0301, 1, &screen->sync);
	if (ret) {
		NOUVEAU_ERR("Error creating notifier object: %d\n", ret);
		nvfx_screen_destroy(pscreen);
		return NULL;
	}

	/* Query objects */
	for(i = 0; i < sizeof(query_sizes) / sizeof(query_sizes[0]); ++i)
	{
		ret = nouveau_notifier_alloc(chan, 0xbeef0302, query_sizes[i], &screen->query);
		if(!ret)
			break;
	}

	if (ret) {
		NOUVEAU_ERR("Error initialising query objects: %d\n", ret);
		nvfx_screen_destroy(pscreen);
		return NULL;
	}

	ret = nouveau_resource_init(&screen->query_heap, 0, query_sizes[i]);
	if (ret) {
		NOUVEAU_ERR("Error initialising query object heap: %d\n", ret);
		nvfx_screen_destroy(pscreen);
		return NULL;
	}

	LIST_INITHEAD(&screen->query_list);

	/* Vtxprog resources */
	if (nouveau_resource_init(&screen->vp_exec_heap, 0, screen->use_nv4x ? 512 : 256) ||
	    nouveau_resource_init(&screen->vp_data_heap, 0, screen->use_nv4x ? 468 : 256)) {
		nvfx_screen_destroy(pscreen);
		return NULL;
	}

	BIND_RING(chan, screen->eng3d, 7);

	/* Static eng3d initialisation */
	/* note that we just started using the channel, so we must have space in the pushbuffer */
	BEGIN_RING(chan, screen->eng3d, NV30_3D_DMA_NOTIFY, 1);
	OUT_RING(chan, screen->sync->handle);
	BEGIN_RING(chan, screen->eng3d, NV30_3D_DMA_TEXTURE0, 2);
	OUT_RING(chan, chan->vram->handle);
	OUT_RING(chan, chan->gart->handle);
	BEGIN_RING(chan, screen->eng3d, NV30_3D_DMA_COLOR1, 1);
	OUT_RING(chan, chan->vram->handle);
	BEGIN_RING(chan, screen->eng3d, NV30_3D_DMA_COLOR0, 2);
	OUT_RING(chan, chan->vram->handle);
	OUT_RING(chan, chan->vram->handle);
	BEGIN_RING(chan, screen->eng3d, NV30_3D_DMA_VTXBUF0, 2);
	OUT_RING(chan, chan->vram->handle);
	OUT_RING(chan, chan->gart->handle);

	BEGIN_RING(chan, screen->eng3d, NV30_3D_DMA_FENCE, 2);
	OUT_RING(chan, 0);
	OUT_RING(chan, screen->query->handle);

	BEGIN_RING(chan, screen->eng3d, NV30_3D_DMA_UNK1AC, 2);
	OUT_RING(chan, chan->vram->handle);
	OUT_RING(chan, chan->vram->handle);

	if(!screen->is_nv4x)
		nv30_screen_init(screen);
	else
		nv40_screen_init(screen);

	return pscreen;
}
GLboolean
nouveau_context_create(gl_api api,
		       const struct gl_config *visual, __DRIcontext *dri_ctx,
		       unsigned major_version,
		       unsigned minor_version,
		       uint32_t flags,
		       unsigned *error,
		       void *share_ctx)
{
	__DRIscreen *dri_screen = dri_ctx->driScreenPriv;
	struct nouveau_screen *screen = dri_screen->driverPrivate;
	struct nouveau_context *nctx;
	struct gl_context *ctx;

	switch (api) {
	case API_OPENGL:
		/* Do after-the-fact version checking (below).
		 */
		break;
	case API_OPENGLES:
		/* NV10 and NV20 can support OpenGL ES 1.0 only.  Older chips
		 * cannot do even that.
		 */
		if ((screen->device->chipset & 0xf0) == 0x00) {
			*error = __DRI_CTX_ERROR_BAD_API;
			return GL_FALSE;
		} else if (minor_version != 0) {
			*error = __DRI_CTX_ERROR_BAD_VERSION;
			return GL_FALSE;
		}
		break;
	case API_OPENGLES2:
	case API_OPENGL_CORE:
		*error = __DRI_CTX_ERROR_BAD_API;
		return GL_FALSE;
	}

	/* API and flag filtering is handled in dri2CreateContextAttribs.
	 */
	(void) flags;

	ctx = screen->driver->context_create(screen, visual, share_ctx);
	if (!ctx) {
		*error = __DRI_CTX_ERROR_NO_MEMORY;
		return GL_FALSE;
	}

	nctx = to_nouveau_context(ctx);
	nctx->dri_context = dri_ctx;
	dri_ctx->driverPrivate = ctx;

	_mesa_compute_version(ctx);
	if (ctx->Version < major_version * 10 + minor_version) {
	   nouveau_context_destroy(dri_ctx);
	   *error = __DRI_CTX_ERROR_BAD_VERSION;
	   return GL_FALSE;
	}

	if (nouveau_bo_new(context_dev(ctx), NOUVEAU_BO_VRAM, 0, 4096,
			   NULL, &nctx->fence)) {
		nouveau_context_destroy(dri_ctx);
		*error = __DRI_CTX_ERROR_NO_MEMORY;
		return GL_FALSE;
	}

	*error = __DRI_CTX_ERROR_SUCCESS;
	return GL_TRUE;
}
Exemplo n.º 27
0
void *
nvc0_miptree_transfer_map(struct pipe_context *pctx,
                          struct pipe_resource *res,
                          unsigned level,
                          unsigned usage,
                          const struct pipe_box *box,
                          struct pipe_transfer **ptransfer)
{
   struct nvc0_context *nvc0 = nvc0_context(pctx);
   struct nouveau_device *dev = nvc0->screen->base.device;
   struct nv50_miptree *mt = nv50_miptree(res);
   struct nvc0_transfer *tx;
   uint32_t size;
   int ret;
   unsigned flags = 0;

   if (usage & PIPE_TRANSFER_MAP_DIRECTLY)
      return NULL;

   tx = CALLOC_STRUCT(nvc0_transfer);
   if (!tx)
      return NULL;

   pipe_resource_reference(&tx->base.resource, res);

   tx->base.level = level;
   tx->base.usage = usage;
   tx->base.box = *box;

   if (util_format_is_plain(res->format)) {
      tx->nblocksx = box->width << mt->ms_x;
      tx->nblocksy = box->height << mt->ms_y;
   } else {
      tx->nblocksx = util_format_get_nblocksx(res->format, box->width);
      tx->nblocksy = util_format_get_nblocksy(res->format, box->height);
   }
   tx->nlayers = box->depth;

   tx->base.stride = tx->nblocksx * util_format_get_blocksize(res->format);
   tx->base.layer_stride = tx->nblocksy * tx->base.stride;

   nv50_m2mf_rect_setup(&tx->rect[0], res, level, box->x, box->y, box->z);

   size = tx->base.layer_stride;

   ret = nouveau_bo_new(dev, NOUVEAU_BO_GART | NOUVEAU_BO_MAP, 0,
                        size * tx->nlayers, NULL, &tx->rect[1].bo);
   if (ret) {
      pipe_resource_reference(&tx->base.resource, NULL);
      FREE(tx);
      return NULL;
   }

   tx->rect[1].cpp = tx->rect[0].cpp;
   tx->rect[1].width = tx->nblocksx;
   tx->rect[1].height = tx->nblocksy;
   tx->rect[1].depth = 1;
   tx->rect[1].pitch = tx->base.stride;
   tx->rect[1].domain = NOUVEAU_BO_GART;

   if (usage & PIPE_TRANSFER_READ) {
      unsigned base = tx->rect[0].base;
      unsigned z = tx->rect[0].z;
      unsigned i;
      for (i = 0; i < tx->nlayers; ++i) {
         nvc0->m2mf_copy_rect(nvc0, &tx->rect[1], &tx->rect[0],
                              tx->nblocksx, tx->nblocksy);
         if (mt->layout_3d)
            tx->rect[0].z++;
         else
            tx->rect[0].base += mt->layer_stride;
         tx->rect[1].base += size;
      }
      tx->rect[0].z = z;
      tx->rect[0].base = base;
      tx->rect[1].base = 0;
   }

   if (tx->rect[1].bo->map) {
      *ptransfer = &tx->base;
      return tx->rect[1].bo->map;
   }

   if (usage & PIPE_TRANSFER_READ)
      flags = NOUVEAU_BO_RD;
   if (usage & PIPE_TRANSFER_WRITE)
      flags |= NOUVEAU_BO_WR;

   ret = nouveau_bo_map(tx->rect[1].bo, flags, nvc0->screen->base.client);
   if (ret) {
      pipe_resource_reference(&tx->base.resource, NULL);
      nouveau_bo_ref(NULL, &tx->rect[1].bo);
      FREE(tx);
      return NULL;
   }

   *ptransfer = &tx->base;
   return tx->rect[1].bo->map;
}
Exemplo n.º 28
0
static int
nouveau_channel_prep(struct nouveau_drm *drm, struct nvif_device *device,
		     u32 size, struct nouveau_channel **pchan)
{
	struct nouveau_cli *cli = (void *)device->object.client;
	struct nvkm_mmu *mmu = nvxx_mmu(device);
	struct nv_dma_v0 args = {};
	struct nouveau_channel *chan;
	u32 target;
	int ret;

	chan = *pchan = kzalloc(sizeof(*chan), GFP_KERNEL);
	if (!chan)
		return -ENOMEM;

	chan->device = device;
	chan->drm = drm;

	/* allocate memory for dma push buffer */
	target = TTM_PL_FLAG_TT | TTM_PL_FLAG_UNCACHED;
	if (nouveau_vram_pushbuf)
		target = TTM_PL_FLAG_VRAM;

	ret = nouveau_bo_new(drm->dev, size, 0, target, 0, 0, NULL, NULL,
			    &chan->push.buffer);
	if (ret == 0) {
		ret = nouveau_bo_pin(chan->push.buffer, target, false);
		if (ret == 0)
			ret = nouveau_bo_map(chan->push.buffer);
	}

	if (ret) {
		nouveau_channel_del(pchan);
		return ret;
	}

	/* create dma object covering the *entire* memory space that the
	 * pushbuf lives in, this is because the GEM code requires that
	 * we be able to call out to other (indirect) push buffers
	 */
	chan->push.vma.offset = chan->push.buffer->bo.offset;

	if (device->info.family >= NV_DEVICE_INFO_V0_TESLA) {
		ret = nouveau_bo_vma_add(chan->push.buffer, cli->vm,
					&chan->push.vma);
		if (ret) {
			nouveau_channel_del(pchan);
			return ret;
		}

		args.target = NV_DMA_V0_TARGET_VM;
		args.access = NV_DMA_V0_ACCESS_VM;
		args.start = 0;
		args.limit = cli->vm->mmu->limit - 1;
	} else
	if (chan->push.buffer->bo.mem.mem_type == TTM_PL_VRAM) {
		if (device->info.family == NV_DEVICE_INFO_V0_TNT) {
			/* nv04 vram pushbuf hack, retarget to its location in
			 * the framebuffer bar rather than direct vram access..
			 * nfi why this exists, it came from the -nv ddx.
			 */
			args.target = NV_DMA_V0_TARGET_PCI;
			args.access = NV_DMA_V0_ACCESS_RDWR;
			args.start = nvxx_device(device)->func->
				resource_addr(nvxx_device(device), 1);
			args.limit = args.start + device->info.ram_user - 1;
		} else {
			args.target = NV_DMA_V0_TARGET_VRAM;
			args.access = NV_DMA_V0_ACCESS_RDWR;
			args.start = 0;
			args.limit = device->info.ram_user - 1;
		}
	} else {
		if (chan->drm->agp.bridge) {
			args.target = NV_DMA_V0_TARGET_AGP;
			args.access = NV_DMA_V0_ACCESS_RDWR;
			args.start = chan->drm->agp.base;
			args.limit = chan->drm->agp.base +
				     chan->drm->agp.size - 1;
		} else {
			args.target = NV_DMA_V0_TARGET_VM;
			args.access = NV_DMA_V0_ACCESS_RDWR;
			args.start = 0;
			args.limit = mmu->limit - 1;
		}
	}

	ret = nvif_object_init(&device->object, 0, NV_DMA_FROM_MEMORY,
			       &args, sizeof(args), &chan->push.ctxdma);
	if (ret) {
		nouveau_channel_del(pchan);
		return ret;
	}

	return 0;
}
Exemplo n.º 29
0
struct pipe_resource *
nv30_miptree_create(struct pipe_screen *pscreen,
                    const struct pipe_resource *tmpl)
{
   struct nouveau_device *dev = nouveau_screen(pscreen)->device;
   struct nv30_miptree *mt = CALLOC_STRUCT(nv30_miptree);
   struct pipe_resource *pt = &mt->base.base;
   unsigned blocksz, size;
   unsigned w, h, d, l;
   int ret;

   switch (tmpl->nr_samples) {
   case 4:
      mt->ms_mode = 0x00004000;
      mt->ms_x = 1;
      mt->ms_y = 1;
      break;
   case 2:
      mt->ms_mode = 0x00003000;
      mt->ms_x = 1;
      mt->ms_y = 0;
      break;
   default:
      mt->ms_mode = 0x00000000;
      mt->ms_x = 0;
      mt->ms_y = 0;
      break;
   }

   mt->base.vtbl = &nv30_miptree_vtbl;
   *pt = *tmpl;
   pipe_reference_init(&pt->reference, 1);
   pt->screen = pscreen;

   w = pt->width0 << mt->ms_x;
   h = pt->height0 << mt->ms_y;
   d = (pt->target == PIPE_TEXTURE_3D) ? pt->depth0 : 1;
   blocksz = util_format_get_blocksize(pt->format);

   if ((pt->target == PIPE_TEXTURE_RECT) ||
       !util_is_power_of_two(pt->width0) ||
       !util_is_power_of_two(pt->height0) ||
       !util_is_power_of_two(pt->depth0) ||
       util_format_is_compressed(pt->format) ||
       util_format_is_float(pt->format) || mt->ms_mode) {
      mt->uniform_pitch = util_format_get_nblocksx(pt->format, w) * blocksz;
      mt->uniform_pitch = align(mt->uniform_pitch, 64);
   }

   if (!mt->uniform_pitch)
      mt->swizzled = TRUE;

   size = 0;
   for (l = 0; l <= pt->last_level; l++) {
      struct nv30_miptree_level *lvl = &mt->level[l];
      unsigned nbx = util_format_get_nblocksx(pt->format, w);
      unsigned nby = util_format_get_nblocksx(pt->format, h);

      lvl->offset = size;
      lvl->pitch  = mt->uniform_pitch;
      if (!lvl->pitch)
         lvl->pitch = nbx * blocksz;

      lvl->zslice_size = lvl->pitch * nby;
      size += lvl->zslice_size * d;

      w = u_minify(w, 1);
      h = u_minify(h, 1);
      d = u_minify(d, 1);
   }

   mt->layer_size = size;
   if (pt->target == PIPE_TEXTURE_CUBE) {
      if (!mt->uniform_pitch)
         mt->layer_size = align(mt->layer_size, 128);
      size = mt->layer_size * 6;
   }

   ret = nouveau_bo_new(dev, NOUVEAU_BO_VRAM, 256, size, NULL, &mt->base.bo);
   if (ret) {
      FREE(mt);
      return NULL;
   }

   mt->base.domain = NOUVEAU_BO_VRAM;
   return &mt->base.base;
}
Exemplo n.º 30
0
struct pipe_screen *
nv50_screen_create(struct nouveau_device *dev)
{
   struct nv50_screen *screen;
   struct pipe_screen *pscreen;
   struct nouveau_object *chan;
   uint64_t value;
   uint32_t tesla_class;
   unsigned stack_size;
   int ret;

   screen = CALLOC_STRUCT(nv50_screen);
   if (!screen)
      return NULL;
   pscreen = &screen->base.base;

   ret = nouveau_screen_init(&screen->base, dev);
   if (ret) {
      NOUVEAU_ERR("nouveau_screen_init failed: %d\n", ret);
      goto fail;
   }

   /* TODO: Prevent FIFO prefetch before transfer of index buffers and
    *  admit them to VRAM.
    */
   screen->base.vidmem_bindings |= PIPE_BIND_CONSTANT_BUFFER |
      PIPE_BIND_VERTEX_BUFFER;
   screen->base.sysmem_bindings |=
      PIPE_BIND_VERTEX_BUFFER | PIPE_BIND_INDEX_BUFFER;

   screen->base.pushbuf->user_priv = screen;
   screen->base.pushbuf->rsvd_kick = 5;

   chan = screen->base.channel;

   pscreen->destroy = nv50_screen_destroy;
   pscreen->context_create = nv50_create;
   pscreen->is_format_supported = nv50_screen_is_format_supported;
   pscreen->get_param = nv50_screen_get_param;
   pscreen->get_shader_param = nv50_screen_get_shader_param;
   pscreen->get_paramf = nv50_screen_get_paramf;

   nv50_screen_init_resource_functions(pscreen);

   if (screen->base.device->chipset < 0x84 ||
       debug_get_bool_option("NOUVEAU_PMPEG", FALSE)) {
      /* PMPEG */
      nouveau_screen_init_vdec(&screen->base);
   } else if (screen->base.device->chipset < 0x98 ||
              screen->base.device->chipset == 0xa0) {
      /* VP2 */
      screen->base.base.get_video_param = nv84_screen_get_video_param;
      screen->base.base.is_video_format_supported = nv84_screen_video_supported;
   } else {
      /* VP3/4 */
      screen->base.base.get_video_param = nouveau_vp3_screen_get_video_param;
      screen->base.base.is_video_format_supported = nouveau_vp3_screen_video_supported;
   }

   ret = nouveau_bo_new(dev, NOUVEAU_BO_GART | NOUVEAU_BO_MAP, 0, 4096,
                        NULL, &screen->fence.bo);
   if (ret) {
      NOUVEAU_ERR("Failed to allocate fence bo: %d\n", ret);
      goto fail;
   }

   nouveau_bo_map(screen->fence.bo, 0, NULL);
   screen->fence.map = screen->fence.bo->map;
   screen->base.fence.emit = nv50_screen_fence_emit;
   screen->base.fence.update = nv50_screen_fence_update;

   ret = nouveau_object_new(chan, 0xbeef0301, NOUVEAU_NOTIFIER_CLASS,
                            &(struct nv04_notify){ .length = 32 },
                            sizeof(struct nv04_notify), &screen->sync);