Esempio n. 1
0
static void
nvc0_memcpy_pcopy0(struct pscnv_ib_chan *chan, const uint64_t dst_addr,
		 const uint64_t src_addr, const uint32_t size, int flags)
{
	static const uint32_t mode = 0x3110; /* QUERY_SHORT|QUERY|SRC_LINEAR|DST_LINEAR */
	static const uint32_t pitch = 0x8000;
	const uint32_t ycnt = size / pitch;
	const uint32_t rem_size = size - ycnt * pitch;
	
	uint64_t dst_pos = dst_addr;
	uint64_t src_pos = src_addr;
	
	if (flags & PSCNV_DMA_VERBOSE) {
		char size_str[16];
		pscnv_mem_human_readable(size_str, size);
		NV_INFO(chan->dev, "DMA: PCOPY0- copy %s from %llx to %llx\n",
			size_str, src_addr, dst_addr);
	}

	if (ycnt) {
		BEGIN_NVC0(chan, GDEV_SUBCH_NV_PCOPY0, 0x30c, 6);
		OUT_RING(chan, src_pos >> 32);  /* SRC_ADDR_HIGH */
		OUT_RING(chan, src_pos);	/* SRC_ADDR_LOW */
		OUT_RING(chan, dst_pos >> 32);  /* DST_ADDR_HIGH */
		OUT_RING(chan, dst_pos);	/* DST_ADDR_LOW */
		OUT_RING(chan, pitch);		/* SRC_PITCH_IN */
		OUT_RING(chan, pitch);		/* DST_PITCH_IN */
		BEGIN_NVC0(chan, GDEV_SUBCH_NV_PCOPY0, 0x324, 2);
		OUT_RING(chan, pitch);		/* XCNT */
		OUT_RING(chan, ycnt);		/* YCNT */
		BEGIN_NVC0(chan, GDEV_SUBCH_NV_PCOPY0, 0x300, 1);
		OUT_RING(chan, mode);		/* EXEC */
		FIRE_RING(chan);
	}
	
	dst_pos += ycnt * pitch;
	src_pos += ycnt * pitch;
	
	if (rem_size) {
		BEGIN_NVC0(chan, GDEV_SUBCH_NV_PCOPY0, 0x30c, 6);
		OUT_RING(chan, src_pos >> 32);  /* SRC_ADDR_HIGH */
		OUT_RING(chan, src_pos);	/* SRC_ADDR_LOW */
		OUT_RING(chan, dst_pos >> 32);  /* DST_ADDR_HIGH */
		OUT_RING(chan, dst_pos);	/* DST_ADDR_LOW */
		OUT_RING(chan, rem_size);	/* SRC_PITCH_IN */
		OUT_RING(chan, rem_size);	/* DST_PITCH_IN */
		BEGIN_NVC0(chan, GDEV_SUBCH_NV_PCOPY0, 0x324, 2);
		OUT_RING(chan, rem_size);	/* XCNT */
		OUT_RING(chan, 1);		/* YCNT */
		BEGIN_NVC0(chan, GDEV_SUBCH_NV_PCOPY0, 0x300, 1);
		OUT_RING(chan, mode);		/* EXEC */
		FIRE_RING(chan);
	}
}
void
nouveau_channel_free(struct nouveau_channel **chan)
{
	struct nouveau_channel_priv *nvchan;
	struct nouveau_device_priv *nvdev;
	struct drm_nouveau_channel_free cf;

	if (!chan || !*chan)
		return;
	nvchan = nouveau_channel(*chan);
	*chan = NULL;
	nvdev = nouveau_device(nvchan->base.device);

	FIRE_RING(&nvchan->base);

	nouveau_bo_unmap(nvchan->notifier_bo);
	nouveau_bo_ref(NULL, &nvchan->notifier_bo);

	nouveau_grobj_free(&nvchan->base.vram);
	nouveau_grobj_free(&nvchan->base.gart);
	nouveau_grobj_free(&nvchan->base.nullobj);

	cf.channel = nvchan->drm.channel;
	drmCommandWrite(nvdev->fd, DRM_NOUVEAU_CHANNEL_FREE, &cf, sizeof(cf));
	free(nvchan);
}
Esempio n. 3
0
boolean
nv30_draw_arrays(struct pipe_context *pipe,
		 unsigned mode, unsigned start, unsigned count)
{
	struct nv30_context *nv30 = nv30_context(pipe);
	struct nouveau_channel *chan = nv30->screen->base.channel;
	unsigned restart = 0;

	nv30_vbo_set_idxbuf(nv30, NULL, 0);
	if (FORCE_SWTNL || !nv30_state_validate(nv30)) {
		/*return nv30_draw_elements_swtnl(pipe, NULL, 0,
						mode, start, count);*/
		return FALSE;
	}

	while (count) {
		unsigned vc, nr;

		nv30_state_emit(nv30);

		vc = nouveau_vbuf_split(chan->pushbuf->remaining, 6, 256,
					mode, start, count, &restart);
		if (!vc) {
			FIRE_RING(NULL);
			continue;
		}

		BEGIN_RING(rankine, NV34TCL_VERTEX_BEGIN_END, 1);
		OUT_RING  (nvgl_primitive(mode));

		nr = (vc & 0xff);
		if (nr) {
			BEGIN_RING(rankine, NV34TCL_VB_VERTEX_BATCH, 1);
			OUT_RING  (((nr - 1) << 24) | start);
			start += nr;
		}

		nr = vc >> 8;
		while (nr) {
			unsigned push = nr > 2047 ? 2047 : nr;

			nr -= push;

			BEGIN_RING_NI(rankine, NV34TCL_VB_VERTEX_BATCH, push);
			while (push--) {
				OUT_RING(((0x100 - 1) << 24) | start);
				start += 0x100;
			}
		}

		BEGIN_RING(rankine, NV34TCL_VERTEX_BEGIN_END, 1);
		OUT_RING  (0);

		count -= vc;
		start = restart;
	}

	pipe->flush(pipe, 0, NULL);
	return TRUE;
}
Esempio n. 4
0
void
nouveau_channel_free(struct nouveau_channel **chan)
{
	struct nouveau_channel_priv *nvchan;
	struct nouveau_device_priv *nvdev;
	struct drm_nouveau_channel_free cf;
	unsigned i;

	if (!chan || !*chan)
		return;
	nvchan = nouveau_channel(*chan);
	(*chan)->flush_notify = NULL;
	*chan = NULL;
	nvdev = nouveau_device(nvchan->base.device);

	FIRE_RING(&nvchan->base);

	nouveau_pushbuf_fini(&nvchan->base);
	if (nvchan->notifier_bo) {
		nouveau_bo_unmap(nvchan->notifier_bo);
		nouveau_bo_ref(NULL, &nvchan->notifier_bo);
	}

	for (i = 0; i < nvchan->drm.nr_subchan; i++)
		free(nvchan->base.subc[i].gr);

	nouveau_grobj_free(&nvchan->base.vram);
	nouveau_grobj_free(&nvchan->base.gart);
	nouveau_grobj_free(&nvchan->base.nullobj);

	cf.channel = nvchan->drm.channel;
	drmCommandWrite(nvdev->fd, DRM_NOUVEAU_CHANNEL_FREE, &cf, sizeof(cf));
	free(nvchan);
}
Esempio n. 5
0
static boolean
nv04_init_hwctx(struct nv04_context *nv04)
{
	// requires a valid handle
//	BEGIN_RING(fahrenheit, NV04_DX5_TEXTURED_TRIANGLE_NOTIFY, 1);
//	OUT_RING(0);
	BEGIN_RING(fahrenheit, NV04_DX5_TEXTURED_TRIANGLE_NOP, 1);
	OUT_RING(0);

	BEGIN_RING(fahrenheit, NV04_DX5_TEXTURED_TRIANGLE_CONTROL, 1);
	OUT_RING(0x40182800);
//	OUT_RING(1<<20/*no cull*/);
	BEGIN_RING(fahrenheit, NV04_DX5_TEXTURED_TRIANGLE_BLEND, 1);
//	OUT_RING(0x24|(1<<6)|(1<<8));
	OUT_RING(0x120001a4);
	BEGIN_RING(fahrenheit, NV04_DX5_TEXTURED_TRIANGLE_FORMAT, 1);
	OUT_RING(0x332213a1);
	BEGIN_RING(fahrenheit, NV04_DX5_TEXTURED_TRIANGLE_FILTER, 1);
	OUT_RING(0x11001010);
	BEGIN_RING(fahrenheit, NV04_DX5_TEXTURED_TRIANGLE_COLORKEY, 1);
	OUT_RING(0x0);
//	BEGIN_RING(fahrenheit, NV04_DX5_TEXTURED_TRIANGLE_OFFSET, 1);
//	OUT_RING(SCREEN_OFFSET);
	BEGIN_RING(fahrenheit, NV04_DX5_TEXTURED_TRIANGLE_FOGCOLOR, 1);
	OUT_RING(0xff000000);



	FIRE_RING (NULL);
	return TRUE;
}
Esempio n. 6
0
static void
nv04_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
{
	struct nouveau_fbcon_par *par = info->par;
	struct drm_device *dev = par->dev;
	struct drm_nouveau_private *dev_priv = dev->dev_private;
	struct nouveau_channel *chan = dev_priv->channel;

	if (info->state != FBINFO_STATE_RUNNING)
		return;

	if (!(info->flags & FBINFO_HWACCEL_DISABLED) && RING_SPACE(chan, 7)) {
		nouveau_fbcon_gpu_lockup(info);
	}

	if (info->flags & FBINFO_HWACCEL_DISABLED) {
		cfb_fillrect(info, rect);
		return;
	}

	BEGIN_RING(chan, NvSubGdiRect, 0x02fc, 1);
	OUT_RING(chan, (rect->rop != ROP_COPY) ? 1 : 3);
	BEGIN_RING(chan, NvSubGdiRect, 0x03fc, 1);
	if (info->fix.visual == FB_VISUAL_TRUECOLOR ||
	    info->fix.visual == FB_VISUAL_DIRECTCOLOR)
		OUT_RING(chan, ((uint32_t *)info->pseudo_palette)[rect->color]);
	else
		OUT_RING(chan, rect->color);
	BEGIN_RING(chan, NvSubGdiRect, 0x0400, 2);
	OUT_RING(chan, (rect->dx << 16) | rect->dy);
	OUT_RING(chan, (rect->width << 16) | rect->height);
	FIRE_RING(chan);
}
Esempio n. 7
0
static void
nvc0_memcpy_m2mf(struct pscnv_ib_chan *chan, const uint64_t dst_addr,
		 const uint64_t src_addr, const uint32_t size, int flags)
{
	/* MODE 1 means fire fence */
	static const uint32_t mode1 = 0x102110; /* QUERY_SHORT|QUERY_YES|SRC_LINEAR|DST_LINEAR */
	static const uint32_t mode2 = 0x100110; /* QUERY_SHORT|SRC_LINEAR|DST_LINEAR */
	static const uint32_t page_size = PSCNV_MEM_PAGE_SIZE;
	const uint32_t page_count = size / page_size;
	const uint32_t rem_size = size - page_size * page_count;
	
	uint64_t dst_pos = dst_addr;
	uint64_t src_pos = src_addr;
	uint32_t pages_left = page_count;
	
	if (flags & PSCNV_DMA_VERBOSE) {
		char size_str[16];
		pscnv_mem_human_readable(size_str, size);
		NV_INFO(chan->dev, "DMA: M2MF- copy %s from %llx to %llx\n",
			size_str, src_addr, dst_addr);
	}

	while (pages_left) {
		int line_count = (pages_left > 2047) ? 2047 : pages_left;
		BEGIN_NVC0(chan, GDEV_SUBCH_NV_M2MF, 0x238, 2);
		OUT_RING(chan, dst_pos >> 32); /* OFFSET_OUT_HIGH */
		OUT_RING(chan, dst_pos); /* OFFSET_OUT_LOW */
		BEGIN_NVC0(chan, GDEV_SUBCH_NV_M2MF, 0x30c, 6);
		OUT_RING(chan, src_pos >> 32); /* OFFSET_IN_HIGH */
		OUT_RING(chan, src_pos); /* OFFSET_IN_LOW */
		OUT_RING(chan, page_size); /* SRC_PITCH_IN */
		OUT_RING(chan, page_size); /* DST_PITCH_IN */
		OUT_RING(chan, page_size); /* LINE_LENGTH_IN */
		OUT_RING(chan, line_count); /* LINE_COUNT */
		BEGIN_NVC0(chan, GDEV_SUBCH_NV_M2MF, 0x300, 1);
		if (pages_left == line_count && rem_size == 0)
			OUT_RING(chan, mode1); /* EXEC */
		else
			OUT_RING(chan, mode2); /* EXEC */
		pages_left -= line_count;
		dst_pos += (page_size * line_count);
		src_pos += (page_size * line_count);
	}
	if (rem_size) {
		BEGIN_NVC0(chan, GDEV_SUBCH_NV_M2MF, 0x238, 2);
		OUT_RING(chan, dst_pos >> 32); /* OFFSET_OUT_HIGH */
		OUT_RING(chan, dst_pos); /* OFFSET_OUT_LOW */
		BEGIN_NVC0(chan, GDEV_SUBCH_NV_M2MF, 0x30c, 6);
		OUT_RING(chan, src_pos >> 32); /* OFFSET_IN_HIGH */
		OUT_RING(chan, src_pos); /* OFFSET_IN_LOW */
		OUT_RING(chan, rem_size); /* SRC_PITCH_IN */
		OUT_RING(chan, rem_size); /* DST_PITCH_IN */
		OUT_RING(chan, rem_size); /* LINE_LENGTH_IN */
		OUT_RING(chan, 1); /* LINE_COUNT */
		BEGIN_NVC0(chan, GDEV_SUBCH_NV_M2MF, 0x300, 1);
		OUT_RING(chan, mode1); /* EXEC */
	}

	FIRE_RING(chan);
}
Esempio n. 8
0
static void
nv04_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *region)
{
	struct nouveau_fbcon_par *par = info->par;
	struct drm_device *dev = par->dev;
	struct drm_nouveau_private *dev_priv = dev->dev_private;
	struct nouveau_channel *chan = dev_priv->channel;

	if (info->state != FBINFO_STATE_RUNNING)
		return;

	if (!(info->flags & FBINFO_HWACCEL_DISABLED) && RING_SPACE(chan, 4)) {
		nouveau_fbcon_gpu_lockup(info);
	}

	if (info->flags & FBINFO_HWACCEL_DISABLED) {
		cfb_copyarea(info, region);
		return;
	}

	BEGIN_RING(chan, NvSubImageBlit, 0x0300, 3);
	OUT_RING(chan, (region->sy << 16) | region->sx);
	OUT_RING(chan, (region->dy << 16) | region->dx);
	OUT_RING(chan, (region->height << 16) | region->width);
	FIRE_RING(chan);
}
Esempio n. 9
0
static void
nv50_cursor_show(struct nouveau_crtc *nv_crtc, bool update)
{
	struct drm_device *dev = nv_crtc->base.dev;
	struct nouveau_drm *drm = nouveau_drm(dev);
	struct nouveau_channel *evo = nv50_display(dev)->master;
	int ret;

	NV_DEBUG(drm, "\n");

	if (update && nv_crtc->cursor.visible)
		return;

	ret = RING_SPACE(evo, (nv_device(drm->device)->chipset != 0x50 ? 5 : 3) + update * 2);
	if (ret) {
		NV_ERROR(drm, "no space while unhiding cursor\n");
		return;
	}

	if (nv_device(drm->device)->chipset != 0x50) {
		BEGIN_NV04(evo, 0, NV84_EVO_CRTC(nv_crtc->index, CURSOR_DMA), 1);
		OUT_RING(evo, NvEvoVRAM);
	}
	BEGIN_NV04(evo, 0, NV50_EVO_CRTC(nv_crtc->index, CURSOR_CTRL), 2);
	OUT_RING(evo, NV50_EVO_CRTC_CURSOR_CTRL_SHOW);
	OUT_RING(evo, nv_crtc->cursor.offset >> 8);

	if (update) {
		BEGIN_NV04(evo, 0, NV50_EVO_UPDATE, 1);
		OUT_RING(evo, 0);
		FIRE_RING(evo);
		nv_crtc->cursor.visible = true;
	}
}
Esempio n. 10
0
static void
nv50_cursor_hide(struct nouveau_crtc *nv_crtc, bool update)
{
	struct drm_nouveau_private *dev_priv = nv_crtc->base.dev->dev_private;
	struct nouveau_channel *evo = dev_priv->evo;
	struct drm_device *dev = nv_crtc->base.dev;
	int ret;

	NV_DEBUG_KMS(dev, "\n");

	if (update && !nv_crtc->cursor.visible)
		return;

	ret = RING_SPACE(evo, (dev_priv->chipset != 0x50 ? 5 : 3) + update * 2);
	if (ret) {
		NV_ERROR(dev, "no space while hiding cursor\n");
		return;
	}
	BEGIN_RING(evo, 0, NV50_EVO_CRTC(nv_crtc->index, CURSOR_CTRL), 2);
	OUT_RING(evo, NV50_EVO_CRTC_CURSOR_CTRL_HIDE);
	OUT_RING(evo, 0);
	if (dev_priv->chipset != 0x50) {
		BEGIN_RING(evo, 0, NV84_EVO_CRTC(nv_crtc->index, CURSOR_DMA), 1);
		OUT_RING(evo, NV84_EVO_CRTC_CURSOR_DMA_HANDLE_NONE);
	}

	if (update) {
		BEGIN_RING(evo, 0, NV50_EVO_UPDATE, 1);
		OUT_RING(evo, 0);
		FIRE_RING(evo);
		nv_crtc->cursor.visible = false;
	}
}
Esempio n. 11
0
static int
nouveau_card_channel_init(struct drm_device *dev)
{
	struct drm_nouveau_private *dev_priv = dev->dev_private;
	struct nouveau_channel *chan;
	int ret, oclass;

	ret = nouveau_channel_alloc(dev, &chan, NULL, NvDmaFB, NvDmaTT);
	dev_priv->channel = chan;
	if (ret)
		return ret;

	mutex_unlock(&dev_priv->channel->mutex);

	if (dev_priv->card_type <= NV_50) {
		if (dev_priv->card_type < NV_50)
			oclass = 0x0039;
		else
			oclass = 0x5039;

		ret = nouveau_gpuobj_gr_new(chan, NvM2MF, oclass);
		if (ret)
			goto error;

		ret = nouveau_notifier_alloc(chan, NvNotify0, 32, 0xfe0, 0x1000,
					     &chan->m2mf_ntfy);
		if (ret)
			goto error;

		ret = RING_SPACE(chan, 6);
		if (ret)
			goto error;

		BEGIN_RING(chan, NvSubM2MF, NV_MEMORY_TO_MEMORY_FORMAT_NAME, 1);
		OUT_RING  (chan, NvM2MF);
		BEGIN_RING(chan, NvSubM2MF, NV_MEMORY_TO_MEMORY_FORMAT_DMA_NOTIFY, 3);
		OUT_RING  (chan, NvNotify0);
		OUT_RING  (chan, chan->vram_handle);
		OUT_RING  (chan, chan->gart_handle);
	} else
	if (dev_priv->card_type <= NV_D0) {
		ret = nouveau_gpuobj_gr_new(chan, 0x9039, 0x9039);
		if (ret)
			goto error;

		ret = RING_SPACE(chan, 2);
		if (ret)
			goto error;

		BEGIN_NVC0(chan, 2, NvSubM2MF, 0x0000, 1);
		OUT_RING  (chan, 0x00009039);
	}

	FIRE_RING (chan);
error:
	if (ret)
		nouveau_card_channel_fini(dev);
	return ret;
}
Esempio n. 12
0
static void
nouveauSpanRenderStart( GLcontext *ctx )
{
   nouveauContextPtr nmesa = NOUVEAU_CONTEXT(ctx);
   FIRE_RING();
   LOCK_HARDWARE(nmesa);
   nouveauWaitForIdleLocked( nmesa );
}
Esempio n. 13
0
static void
nv50_flush(struct pipe_context *pipe, unsigned flags,
	   struct pipe_fence_handle **fence)
{
	struct nv50_context *nv50 = (struct nv50_context *)pipe;
	
	FIRE_RING(nv50->screen->nvws->channel);
}
Esempio n. 14
0
static int
nouveau_fbcon_sync(struct fb_info *info)
{
	struct nouveau_fbdev *nfbdev = info->par;
	struct drm_device *dev = nfbdev->dev;
	struct drm_nouveau_private *dev_priv = dev->dev_private;
	struct nouveau_channel *chan = dev_priv->channel;
	int ret, i;

	if (!chan || !chan->accel_done || in_interrupt() ||
	    info->state != FBINFO_STATE_RUNNING ||
	    info->flags & FBINFO_HWACCEL_DISABLED)
		return 0;

	if (!mutex_trylock(&chan->mutex))
		return 0;

	ret = RING_SPACE(chan, 4);
	if (ret) {
		mutex_unlock(&chan->mutex);
		nouveau_fbcon_gpu_lockup(info);
		return 0;
	}

	if (dev_priv->card_type >= NV_C0) {
		BEGIN_NVC0(chan, 2, NvSub2D, 0x010c, 1);
		OUT_RING  (chan, 0);
		BEGIN_NVC0(chan, 2, NvSub2D, 0x0100, 1);
		OUT_RING  (chan, 0);
	} else {
		BEGIN_RING(chan, 0, 0x0104, 1);
		OUT_RING  (chan, 0);
		BEGIN_RING(chan, 0, 0x0100, 1);
		OUT_RING  (chan, 0);
	}

	nouveau_bo_wr32(chan->notifier_bo, chan->m2mf_ntfy/4 + 3, 0xffffffff);
	FIRE_RING(chan);
	mutex_unlock(&chan->mutex);

	ret = -EBUSY;
	for (i = 0; i < 100000; i++) {
		if (!nouveau_bo_rd32(chan->notifier_bo, chan->m2mf_ntfy/4 + 3)) {
			ret = 0;
			break;
		}
		DRM_UDELAY(1);
	}

	if (ret) {
		nouveau_fbcon_gpu_lockup(info);
		return 0;
	}

	chan->accel_done = false;
	return 0;
}
Esempio n. 15
0
static void
nv20_flush(struct pipe_context *pipe, unsigned flags,
	   struct pipe_fence_handle **fence)
{
	struct nv20_context *nv20 = nv20_context(pipe);

	draw_flush(nv20->draw);

	FIRE_RING(fence);
}
Esempio n. 16
0
static void
nv50_flush(struct pipe_context *pipe, unsigned flags,
	   struct pipe_fence_handle **fence)
{
	struct nv50_context *nv50 = nv50_context(pipe);
	struct nouveau_channel *chan = nv50->screen->base.channel;

	if (flags & PIPE_FLUSH_FRAME)
		FIRE_RING(chan);
}
Esempio n. 17
0
static void
nv40_query_end(struct pipe_context *pipe, struct pipe_query *pq)
{
	struct nv40_context *nv40 = nv40_context(pipe);
	struct nv40_query *q = nv40_query(pq);

	BEGIN_RING(curie, NV40TCL_QUERY_GET, 1);
	OUT_RING  ((0x01 << NV40TCL_QUERY_GET_UNK24_SHIFT) |
		   ((q->object->start * 32) << NV40TCL_QUERY_GET_OFFSET_SHIFT));
	FIRE_RING(NULL);
}
Esempio n. 18
0
inline void nv10FinishPrimitive(struct nouveau_context *nmesa)
{
	if ((nmesa->screen->card->type>=NV_10) && (nmesa->screen->card->type<=NV_17))
		BEGIN_RING_SIZE(NvSub3D,NV10_TCL_PRIMITIVE_3D_BEGIN_END,1);
	else if (nmesa->screen->card->type==NV_20)
		BEGIN_RING_SIZE(NvSub3D,NV20_TCL_PRIMITIVE_3D_BEGIN_END,1);
	else
		BEGIN_RING_SIZE(NvSub3D,NV30_TCL_PRIMITIVE_3D_BEGIN_END,1);
	OUT_RING(0x0);
	FIRE_RING();
}
Esempio n. 19
0
static void
nvc0_m2mf_push_rect(struct pipe_screen *pscreen,
                    const struct nv50_m2mf_rect *dst,
                    const void *data,
                    unsigned nblocksx, unsigned nblocksy)
{
   struct nouveau_channel *chan;
   const uint8_t *src = (const uint8_t *)data;
   const int cpp = dst->cpp;
   const int line_len = nblocksx * cpp;
   int dy = dst->y;

   assert(nouveau_bo_tile_layout(dst->bo));

   BEGIN_RING(chan, RING_MF(TILING_MODE_OUT), 5);
   OUT_RING  (chan, dst->tile_mode);
   OUT_RING  (chan, dst->width * cpp);
   OUT_RING  (chan, dst->height);
   OUT_RING  (chan, dst->depth);
   OUT_RING  (chan, dst->z);

   while (nblocksy) {
      int line_count, words;
      int size = MIN2(AVAIL_RING(chan), NV04_PFIFO_MAX_PACKET_LEN);

      if (size < (12 + words)) {
         FIRE_RING(chan);
         continue;
      }
      line_count = (size * 4) / line_len;
      words = (line_count * line_len + 3) / 4;

      BEGIN_RING(chan, RING_MF(OFFSET_OUT_HIGH), 2);
      OUT_RELOCh(chan, dst->bo, dst->base, dst->domain | NOUVEAU_BO_WR);
      OUT_RELOCl(chan, dst->bo, dst->base, dst->domain | NOUVEAU_BO_WR);

      BEGIN_RING(chan, RING_MF(TILING_POSITION_OUT_X), 2);
      OUT_RING  (chan, dst->x * cpp);
      OUT_RING  (chan, dy);
      BEGIN_RING(chan, RING_MF(LINE_LENGTH_IN), 2);
      OUT_RING  (chan, line_len);
      OUT_RING  (chan, line_count);
      BEGIN_RING(chan, RING_MF(EXEC), 1);
      OUT_RING  (chan, (1 << NVC0_M2MF_EXEC_INC__SHIFT) |
                 NVC0_M2MF_EXEC_PUSH | NVC0_M2MF_EXEC_LINEAR_IN);

      BEGIN_RING_NI(chan, RING_MF(DATA), words);
      OUT_RINGp (chan, src, words);

      dy += line_count;
      src += line_len * line_count;
      nblocksy -= line_count;
   }
}
Esempio n. 20
0
static int
nv04_fence_emit(struct nouveau_fence *fence)
{
	struct nouveau_channel *chan = fence->channel;
	int ret = RING_SPACE(chan, 2);
	if (ret == 0) {
		BEGIN_NV04(chan, NvSubSw, 0x0150, 1);
		OUT_RING  (chan, fence->base.seqno);
		FIRE_RING (chan);
	}
	return ret;
}
Esempio n. 21
0
int
nv10_fence_emit(struct nouveau_fence *fence)
{
	struct nouveau_channel *chan = fence->channel;
	int ret = RING_SPACE(chan, 2);
	if (ret == 0) {
		BEGIN_NV04(chan, 0, NV10_SUBCHAN_REF_CNT, 1);
		OUT_RING  (chan, fence->base.seqno);
		FIRE_RING (chan);
	}
	return ret;
}
Esempio n. 22
0
int
nv17_fence_sync(struct nouveau_fence *fence,
		struct nouveau_channel *prev, struct nouveau_channel *chan)
{
	struct nouveau_cli *cli = (void *)prev->user.client;
	struct nv10_fence_priv *priv = chan->drm->fence;
	struct nv10_fence_chan *fctx = chan->fence;
	u32 value;
	int ret;

	if (!mutex_trylock(&cli->mutex))
		return -EBUSY;

	spin_lock(&priv->lock);
	value = priv->sequence;
	priv->sequence += 2;
	spin_unlock(&priv->lock);

	ret = RING_SPACE(prev, 5);
	if (!ret) {
		BEGIN_NV04(prev, 0, NV11_SUBCHAN_DMA_SEMAPHORE, 4);
		OUT_RING  (prev, fctx->sema.handle);
		OUT_RING  (prev, 0);
		OUT_RING  (prev, value + 0);
		OUT_RING  (prev, value + 1);
		FIRE_RING (prev);
	}

	if (!ret && !(ret = RING_SPACE(chan, 5))) {
		BEGIN_NV04(chan, 0, NV11_SUBCHAN_DMA_SEMAPHORE, 4);
		OUT_RING  (chan, fctx->sema.handle);
		OUT_RING  (chan, 0);
		OUT_RING  (chan, value + 1);
		OUT_RING  (chan, value + 2);
		FIRE_RING (chan);
	}

	mutex_unlock(&cli->mutex);
	return 0;
}
Esempio n. 23
0
static boolean
nv40_draw_elements_vbo(struct pipe_context *pipe,
		       unsigned mode, unsigned start, unsigned count)
{
	struct nv40_context *nv40 = nv40_context(pipe);
	struct nouveau_channel *chan = nv40->nvws->channel;
	unsigned restart;

	while (count) {
		unsigned nr, vc;

		nv40_state_emit(nv40);

		vc = nouveau_vbuf_split(chan->pushbuf->remaining, 6, 256,
					mode, start, count, &restart);
		if (!vc) {
			FIRE_RING(NULL);
			continue;
		}
		
		BEGIN_RING(curie, NV40TCL_BEGIN_END, 1);
		OUT_RING  (nvgl_primitive(mode));

		nr = (vc & 0xff);
		if (nr) {
			BEGIN_RING(curie, NV40TCL_VB_INDEX_BATCH, 1);
			OUT_RING  (((nr - 1) << 24) | start);
			start += nr;
		}

		nr = vc >> 8;
		while (nr) {
			unsigned push = nr > 2047 ? 2047 : nr;

			nr -= push;

			BEGIN_RING_NI(curie, NV40TCL_VB_INDEX_BATCH, push);
			while (push--) {
				OUT_RING(((0x100 - 1) << 24) | start);
				start += 0x100;
			}
		}

		BEGIN_RING(curie, NV40TCL_BEGIN_END, 1);
		OUT_RING  (0);

		count -= vc;
		start = restart;
	}

	return TRUE;
}
Esempio n. 24
0
void
MSMFlushAccel(ScreenPtr pScreen)
{
	ScrnInfoPtr pScrn = xf86ScreenToScrn(pScreen);
	MSMPtr pMsm = MSMPTR(pScrn);
	if (pMsm->xa) {
#ifdef HAVE_XA
		MSMFlushXA(pMsm);
#endif
	} else {
		FIRE_RING(pMsm);
	}
}
Esempio n. 25
0
static int
nv17_fence_sync(struct nouveau_fence *fence,
		struct nouveau_channel *prev, struct nouveau_channel *chan)
{
	struct nv10_fence_priv *priv = nv_engine(chan->dev, NVOBJ_ENGINE_FENCE);
	u32 value;
	int ret;

	if (!mutex_trylock(&prev->mutex))
		return -EBUSY;

	spin_lock(&priv->lock);
	value = priv->sequence;
	priv->sequence += 2;
	spin_unlock(&priv->lock);

	ret = RING_SPACE(prev, 5);
	if (!ret) {
		BEGIN_NV04(prev, 0, NV11_SUBCHAN_DMA_SEMAPHORE, 4);
		OUT_RING  (prev, NvSema);
		OUT_RING  (prev, 0);
		OUT_RING  (prev, value + 0);
		OUT_RING  (prev, value + 1);
		FIRE_RING (prev);
	}

	if (!ret && !(ret = RING_SPACE(chan, 5))) {
		BEGIN_NV04(chan, 0, NV11_SUBCHAN_DMA_SEMAPHORE, 4);
		OUT_RING  (chan, NvSema);
		OUT_RING  (chan, 0);
		OUT_RING  (chan, value + 1);
		OUT_RING  (chan, value + 2);
		FIRE_RING (chan);
	}

	mutex_unlock(&prev->mutex);
	return 0;
}
Esempio n. 26
0
static int
nvc0_fence_sync32(struct nouveau_channel *chan, u64 virtual, u32 sequence)
{
	int ret = RING_SPACE(chan, 5);
	if (ret == 0) {
		BEGIN_NVC0(chan, 0, NV84_SUBCHAN_SEMAPHORE_ADDRESS_HIGH, 4);
		OUT_RING  (chan, upper_32_bits(virtual));
		OUT_RING  (chan, lower_32_bits(virtual));
		OUT_RING  (chan, sequence);
		OUT_RING  (chan, NV84_SUBCHAN_SEMAPHORE_TRIGGER_ACQUIRE_GEQUAL |
				 NVC0_SUBCHAN_SEMAPHORE_TRIGGER_YIELD);
		FIRE_RING (chan);
	}
	return ret;
}
Esempio n. 27
0
static int
nvc0_fence_emit32(struct nouveau_channel *chan, u64 virtual, u32 sequence)
{
	int ret = RING_SPACE(chan, 6);
	if (ret == 0) {
		BEGIN_NVC0(chan, 0, NV84_SUBCHAN_SEMAPHORE_ADDRESS_HIGH, 5);
		OUT_RING  (chan, upper_32_bits(virtual));
		OUT_RING  (chan, lower_32_bits(virtual));
		OUT_RING  (chan, sequence);
		OUT_RING  (chan, NV84_SUBCHAN_SEMAPHORE_TRIGGER_WRITE_LONG);
		OUT_RING  (chan, 0x00000000);
		FIRE_RING (chan);
	}
	return ret;
}
Esempio n. 28
0
static void
nv30_flush(struct pipe_context *pipe, unsigned flags,
	   struct pipe_fence_handle **fence)
{
	struct nv30_context *nv30 = nv30_context(pipe);

	if (flags & PIPE_FLUSH_TEXTURE_CACHE) {
		BEGIN_RING(rankine, 0x1fd8, 1);
		OUT_RING  (2);
		BEGIN_RING(rankine, 0x1fd8, 1);
		OUT_RING  (1);
	}

	FIRE_RING(fence);
}
Esempio n. 29
0
static INLINE void
nv40_draw_elements_u16(struct nv40_context *nv40, void *ib,
		       unsigned mode, unsigned start, unsigned count)
{
	struct nouveau_channel *chan = nv40->nvws->channel;

	while (count) {
		uint16_t *elts = (uint16_t *)ib + start;
		unsigned vc, push, restart;

		nv40_state_emit(nv40);

		vc = nouveau_vbuf_split(chan->pushbuf->remaining, 6, 2,
					mode, start, count, &restart);
		if (vc == 0) {
			FIRE_RING(NULL);
			continue;
		}
		count -= vc;

		BEGIN_RING(curie, NV40TCL_BEGIN_END, 1);
		OUT_RING  (nvgl_primitive(mode));

		if (vc & 1) {
			BEGIN_RING(curie, NV40TCL_VB_ELEMENT_U32, 1);
			OUT_RING  (elts[0]);
			elts++; vc--;
		}

		while (vc) {
			unsigned i;

			push = MIN2(vc, 2047 * 2);

			BEGIN_RING_NI(curie, NV40TCL_VB_ELEMENT_U16, push >> 1);
			for (i = 0; i < push; i+=2)
				OUT_RING((elts[i+1] << 16) | elts[i]);

			vc -= push;
			elts += push;
		}

		BEGIN_RING(curie, NV40TCL_BEGIN_END, 1);
		OUT_RING  (0);

		start = restart;
	}
}
Esempio n. 30
0
static int
nv84_fence_sync32(struct nouveau_channel *chan, u64 virtual, u32 sequence)
{
	int ret = RING_SPACE(chan, 7);
	if (ret == 0) {
		BEGIN_NV04(chan, 0, NV11_SUBCHAN_DMA_SEMAPHORE, 1);
		OUT_RING  (chan, chan->vram);
		BEGIN_NV04(chan, 0, NV84_SUBCHAN_SEMAPHORE_ADDRESS_HIGH, 4);
		OUT_RING  (chan, upper_32_bits(virtual));
		OUT_RING  (chan, lower_32_bits(virtual));
		OUT_RING  (chan, sequence);
		OUT_RING  (chan, NV84_SUBCHAN_SEMAPHORE_TRIGGER_ACQUIRE_GEQUAL);
		FIRE_RING (chan);
	}
	return ret;
}