Beispiel #1
0
static int
nouveau_card_channel_init(struct drm_device *dev)
{
	struct drm_nouveau_private *dev_priv = dev->dev_private;
	struct nouveau_channel *chan;
	int ret, oclass;

	ret = nouveau_channel_alloc(dev, &chan, NULL, NvDmaFB, NvDmaTT);
	dev_priv->channel = chan;
	if (ret)
		return ret;

	mutex_unlock(&dev_priv->channel->mutex);

	if (dev_priv->card_type <= NV_50) {
		if (dev_priv->card_type < NV_50)
			oclass = 0x0039;
		else
			oclass = 0x5039;

		ret = nouveau_gpuobj_gr_new(chan, NvM2MF, oclass);
		if (ret)
			goto error;

		ret = nouveau_notifier_alloc(chan, NvNotify0, 32, 0xfe0, 0x1000,
					     &chan->m2mf_ntfy);
		if (ret)
			goto error;

		ret = RING_SPACE(chan, 6);
		if (ret)
			goto error;

		BEGIN_RING(chan, NvSubM2MF, NV_MEMORY_TO_MEMORY_FORMAT_NAME, 1);
		OUT_RING  (chan, NvM2MF);
		BEGIN_RING(chan, NvSubM2MF, NV_MEMORY_TO_MEMORY_FORMAT_DMA_NOTIFY, 3);
		OUT_RING  (chan, NvNotify0);
		OUT_RING  (chan, chan->vram_handle);
		OUT_RING  (chan, chan->gart_handle);
	} else
	if (dev_priv->card_type <= NV_D0) {
		ret = nouveau_gpuobj_gr_new(chan, 0x9039, 0x9039);
		if (ret)
			goto error;

		ret = RING_SPACE(chan, 2);
		if (ret)
			goto error;

		BEGIN_NVC0(chan, 2, NvSubM2MF, 0x0000, 1);
		OUT_RING  (chan, 0x00009039);
	}

	FIRE_RING (chan);
error:
	if (ret)
		nouveau_card_channel_fini(dev);
	return ret;
}
Beispiel #2
0
static void
nv04_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *region)
{
	struct nouveau_fbcon_par *par = info->par;
	struct drm_device *dev = par->dev;
	struct drm_nouveau_private *dev_priv = dev->dev_private;
	struct nouveau_channel *chan = dev_priv->channel;

	if (info->state != FBINFO_STATE_RUNNING)
		return;

	if (!(info->flags & FBINFO_HWACCEL_DISABLED) && RING_SPACE(chan, 4)) {
		nouveau_fbcon_gpu_lockup(info);
	}

	if (info->flags & FBINFO_HWACCEL_DISABLED) {
		cfb_copyarea(info, region);
		return;
	}

	BEGIN_RING(chan, NvSubImageBlit, 0x0300, 3);
	OUT_RING(chan, (region->sy << 16) | region->sx);
	OUT_RING(chan, (region->dy << 16) | region->dx);
	OUT_RING(chan, (region->height << 16) | region->width);
	FIRE_RING(chan);
}
Beispiel #3
0
static void
nv04_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
{
	struct nouveau_fbcon_par *par = info->par;
	struct drm_device *dev = par->dev;
	struct drm_nouveau_private *dev_priv = dev->dev_private;
	struct nouveau_channel *chan = dev_priv->channel;

	if (info->state != FBINFO_STATE_RUNNING)
		return;

	if (!(info->flags & FBINFO_HWACCEL_DISABLED) && RING_SPACE(chan, 7)) {
		nouveau_fbcon_gpu_lockup(info);
	}

	if (info->flags & FBINFO_HWACCEL_DISABLED) {
		cfb_fillrect(info, rect);
		return;
	}

	BEGIN_RING(chan, NvSubGdiRect, 0x02fc, 1);
	OUT_RING(chan, (rect->rop != ROP_COPY) ? 1 : 3);
	BEGIN_RING(chan, NvSubGdiRect, 0x03fc, 1);
	if (info->fix.visual == FB_VISUAL_TRUECOLOR ||
	    info->fix.visual == FB_VISUAL_DIRECTCOLOR)
		OUT_RING(chan, ((uint32_t *)info->pseudo_palette)[rect->color]);
	else
		OUT_RING(chan, rect->color);
	BEGIN_RING(chan, NvSubGdiRect, 0x0400, 2);
	OUT_RING(chan, (rect->dx << 16) | rect->dy);
	OUT_RING(chan, (rect->width << 16) | rect->height);
	FIRE_RING(chan);
}
Beispiel #4
0
static void
nv50_dac_disconnect(struct drm_encoder *encoder)
{
	struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
	struct drm_device *dev = encoder->dev;
	struct drm_nouveau_private *dev_priv = dev->dev_private;
	struct nouveau_channel *evo = dev_priv->evo;
	int ret;

	if (!nv_encoder->crtc)
		return;
	nv50_crtc_blank(nouveau_crtc(nv_encoder->crtc), true);

	NV_DEBUG_KMS(dev, "Disconnecting DAC %d\n", nv_encoder->or);

	ret = RING_SPACE(evo, 4);
	if (ret) {
		NV_ERROR(dev, "no space while disconnecting DAC\n");
		return;
	}
	BEGIN_RING(evo, 0, NV50_EVO_DAC(nv_encoder->or, MODE_CTRL), 1);
	OUT_RING  (evo, 0);
	BEGIN_RING(evo, 0, NV50_EVO_UPDATE, 1);
	OUT_RING  (evo, 0);

	nv_encoder->crtc = NULL;
}
Beispiel #5
0
static void
nv50_cursor_show(struct nouveau_crtc *nv_crtc, bool update)
{
	struct drm_device *dev = nv_crtc->base.dev;
	struct nouveau_drm *drm = nouveau_drm(dev);
	struct nouveau_channel *evo = nv50_display(dev)->master;
	int ret;

	NV_DEBUG(drm, "\n");

	if (update && nv_crtc->cursor.visible)
		return;

	ret = RING_SPACE(evo, (nv_device(drm->device)->chipset != 0x50 ? 5 : 3) + update * 2);
	if (ret) {
		NV_ERROR(drm, "no space while unhiding cursor\n");
		return;
	}

	if (nv_device(drm->device)->chipset != 0x50) {
		BEGIN_NV04(evo, 0, NV84_EVO_CRTC(nv_crtc->index, CURSOR_DMA), 1);
		OUT_RING(evo, NvEvoVRAM);
	}
	BEGIN_NV04(evo, 0, NV50_EVO_CRTC(nv_crtc->index, CURSOR_CTRL), 2);
	OUT_RING(evo, NV50_EVO_CRTC_CURSOR_CTRL_SHOW);
	OUT_RING(evo, nv_crtc->cursor.offset >> 8);

	if (update) {
		BEGIN_NV04(evo, 0, NV50_EVO_UPDATE, 1);
		OUT_RING(evo, 0);
		FIRE_RING(evo);
		nv_crtc->cursor.visible = true;
	}
}
Beispiel #6
0
static void
nv50_cursor_hide(struct nouveau_crtc *nv_crtc, bool update)
{
	struct drm_nouveau_private *dev_priv = nv_crtc->base.dev->dev_private;
	struct nouveau_channel *evo = dev_priv->evo;
	struct drm_device *dev = nv_crtc->base.dev;
	int ret;

	NV_DEBUG_KMS(dev, "\n");

	if (update && !nv_crtc->cursor.visible)
		return;

	ret = RING_SPACE(evo, (dev_priv->chipset != 0x50 ? 5 : 3) + update * 2);
	if (ret) {
		NV_ERROR(dev, "no space while hiding cursor\n");
		return;
	}
	BEGIN_RING(evo, 0, NV50_EVO_CRTC(nv_crtc->index, CURSOR_CTRL), 2);
	OUT_RING(evo, NV50_EVO_CRTC_CURSOR_CTRL_HIDE);
	OUT_RING(evo, 0);
	if (dev_priv->chipset != 0x50) {
		BEGIN_RING(evo, 0, NV84_EVO_CRTC(nv_crtc->index, CURSOR_DMA), 1);
		OUT_RING(evo, NV84_EVO_CRTC_CURSOR_DMA_HANDLE_NONE);
	}

	if (update) {
		BEGIN_RING(evo, 0, NV50_EVO_UPDATE, 1);
		OUT_RING(evo, 0);
		FIRE_RING(evo);
		nv_crtc->cursor.visible = false;
	}
}
Beispiel #7
0
static int
nv04_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
                  struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem)
{
    u32 src_offset = old_mem->start << PAGE_SHIFT;
    u32 dst_offset = new_mem->start << PAGE_SHIFT;
    u32 page_count = new_mem->num_pages;
    int ret;

    ret = RING_SPACE(chan, 3);
    if (ret)
        return ret;

    BEGIN_RING(chan, NvSubM2MF, NV_MEMORY_TO_MEMORY_FORMAT_DMA_SOURCE, 2);
    OUT_RING  (chan, nouveau_bo_mem_ctxdma(bo, chan, old_mem));
    OUT_RING  (chan, nouveau_bo_mem_ctxdma(bo, chan, new_mem));

    page_count = new_mem->num_pages;
    while (page_count) {
        int line_count = (page_count > 2047) ? 2047 : page_count;

        ret = RING_SPACE(chan, 11);
        if (ret)
            return ret;

        BEGIN_RING(chan, NvSubM2MF,
                   NV_MEMORY_TO_MEMORY_FORMAT_OFFSET_IN, 8);
        OUT_RING  (chan, src_offset);
        OUT_RING  (chan, dst_offset);
        OUT_RING  (chan, PAGE_SIZE); /* src_pitch */
        OUT_RING  (chan, PAGE_SIZE); /* dst_pitch */
        OUT_RING  (chan, PAGE_SIZE); /* line_length */
        OUT_RING  (chan, line_count);
        OUT_RING  (chan, 0x00000101);
        OUT_RING  (chan, 0x00000000);
        BEGIN_RING(chan, NvSubM2MF, NV_MEMORY_TO_MEMORY_FORMAT_NOP, 1);
        OUT_RING  (chan, 0);

        page_count -= line_count;
        src_offset += (PAGE_SIZE * line_count);
        dst_offset += (PAGE_SIZE * line_count);
    }

    return 0;
}
Beispiel #8
0
static int
nouveau_fbcon_sync(struct fb_info *info)
{
	struct nouveau_fbdev *nfbdev = info->par;
	struct drm_device *dev = nfbdev->dev;
	struct drm_nouveau_private *dev_priv = dev->dev_private;
	struct nouveau_channel *chan = dev_priv->channel;
	int ret, i;

	if (!chan || !chan->accel_done || in_interrupt() ||
	    info->state != FBINFO_STATE_RUNNING ||
	    info->flags & FBINFO_HWACCEL_DISABLED)
		return 0;

	if (!mutex_trylock(&chan->mutex))
		return 0;

	ret = RING_SPACE(chan, 4);
	if (ret) {
		mutex_unlock(&chan->mutex);
		nouveau_fbcon_gpu_lockup(info);
		return 0;
	}

	if (dev_priv->card_type >= NV_C0) {
		BEGIN_NVC0(chan, 2, NvSub2D, 0x010c, 1);
		OUT_RING  (chan, 0);
		BEGIN_NVC0(chan, 2, NvSub2D, 0x0100, 1);
		OUT_RING  (chan, 0);
	} else {
		BEGIN_RING(chan, 0, 0x0104, 1);
		OUT_RING  (chan, 0);
		BEGIN_RING(chan, 0, 0x0100, 1);
		OUT_RING  (chan, 0);
	}

	nouveau_bo_wr32(chan->notifier_bo, chan->m2mf_ntfy/4 + 3, 0xffffffff);
	FIRE_RING(chan);
	mutex_unlock(&chan->mutex);

	ret = -EBUSY;
	for (i = 0; i < 100000; i++) {
		if (!nouveau_bo_rd32(chan->notifier_bo, chan->m2mf_ntfy/4 + 3)) {
			ret = 0;
			break;
		}
		DRM_UDELAY(1);
	}

	if (ret) {
		nouveau_fbcon_gpu_lockup(info);
		return 0;
	}

	chan->accel_done = false;
	return 0;
}
Beispiel #9
0
static void
nv50_sor_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
		  struct drm_display_mode *adjusted_mode)
{
	struct drm_nouveau_private *dev_priv = encoder->dev->dev_private;
	struct nouveau_channel *evo = dev_priv->evo;
	struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
	struct drm_device *dev = encoder->dev;
	struct nouveau_crtc *crtc = nouveau_crtc(encoder->crtc);
	uint32_t mode_ctl = 0;
	int ret;

	NV_DEBUG_KMS(dev, "or %d\n", nv_encoder->or);

	nv50_sor_dpms(encoder, DRM_MODE_DPMS_ON);

	switch (nv_encoder->dcb->type) {
	case OUTPUT_TMDS:
		if (nv_encoder->dcb->sorconf.link & 1) {
			if (adjusted_mode->clock < 165000)
				mode_ctl = 0x0100;
			else
				mode_ctl = 0x0500;
		} else
			mode_ctl = 0x0200;
		break;
	case OUTPUT_DP:
		mode_ctl |= 0x00050000;
		if (nv_encoder->dcb->sorconf.link & 1)
			mode_ctl |= 0x00000800;
		else
			mode_ctl |= 0x00000900;
		break;
	default:
		break;
	}

	if (crtc->index == 1)
		mode_ctl |= NV50_EVO_SOR_MODE_CTRL_CRTC1;
	else
		mode_ctl |= NV50_EVO_SOR_MODE_CTRL_CRTC0;

	if (adjusted_mode->flags & DRM_MODE_FLAG_NHSYNC)
		mode_ctl |= NV50_EVO_SOR_MODE_CTRL_NHSYNC;

	if (adjusted_mode->flags & DRM_MODE_FLAG_NVSYNC)
		mode_ctl |= NV50_EVO_SOR_MODE_CTRL_NVSYNC;

	ret = RING_SPACE(evo, 2);
	if (ret) {
		NV_ERROR(dev, "no space while connecting SOR\n");
		return;
	}
	BEGIN_RING(evo, 0, NV50_EVO_SOR(nv_encoder->or, MODE_CTRL), 1);
	OUT_RING(evo, mode_ctl);
}
Beispiel #10
0
int
nv10_fence_emit(struct nouveau_fence *fence)
{
	struct nouveau_channel *chan = fence->channel;
	int ret = RING_SPACE(chan, 2);
	if (ret == 0) {
		BEGIN_NV04(chan, 0, NV10_SUBCHAN_REF_CNT, 1);
		OUT_RING  (chan, fence->base.seqno);
		FIRE_RING (chan);
	}
	return ret;
}
Beispiel #11
0
static int
nv04_fence_emit(struct nouveau_fence *fence)
{
	struct nouveau_channel *chan = fence->channel;
	int ret = RING_SPACE(chan, 2);
	if (ret == 0) {
		BEGIN_NV04(chan, NvSubSw, 0x0150, 1);
		OUT_RING  (chan, fence->base.seqno);
		FIRE_RING (chan);
	}
	return ret;
}
Beispiel #12
0
int
nv17_fence_sync(struct nouveau_fence *fence,
		struct nouveau_channel *prev, struct nouveau_channel *chan)
{
	struct nouveau_cli *cli = (void *)prev->user.client;
	struct nv10_fence_priv *priv = chan->drm->fence;
	struct nv10_fence_chan *fctx = chan->fence;
	u32 value;
	int ret;

	if (!mutex_trylock(&cli->mutex))
		return -EBUSY;

	spin_lock(&priv->lock);
	value = priv->sequence;
	priv->sequence += 2;
	spin_unlock(&priv->lock);

	ret = RING_SPACE(prev, 5);
	if (!ret) {
		BEGIN_NV04(prev, 0, NV11_SUBCHAN_DMA_SEMAPHORE, 4);
		OUT_RING  (prev, fctx->sema.handle);
		OUT_RING  (prev, 0);
		OUT_RING  (prev, value + 0);
		OUT_RING  (prev, value + 1);
		FIRE_RING (prev);
	}

	if (!ret && !(ret = RING_SPACE(chan, 5))) {
		BEGIN_NV04(chan, 0, NV11_SUBCHAN_DMA_SEMAPHORE, 4);
		OUT_RING  (chan, fctx->sema.handle);
		OUT_RING  (chan, 0);
		OUT_RING  (chan, value + 1);
		OUT_RING  (chan, value + 2);
		FIRE_RING (chan);
	}

	mutex_unlock(&cli->mutex);
	return 0;
}
/**
 *  \brief Process successfully sent packets
 *  \param gmacd Pointer to GMAC Driver instance.
 */
static void _gmacd_tx_complete_handler(struct _gmacd* gmacd, uint8_t queue)
{
	Gmac* gmac = gmacd->gmac;
	struct _gmacd_queue* q = &gmacd->queues[queue];
	struct _gmac_desc *desc;
	gmacd_callback_t callback;
	uint32_t tsr;

	//printf("<TX>\r\n");

	/* Clear status */
	tsr = gmac_get_tx_status(gmac);
	gmac_clear_tx_status(gmac, tsr);

	while (!RING_EMPTY(q->tx_head, q->tx_tail)) {
		desc = &q->tx_desc[q->tx_tail];

		/* Exit if frame has not been sent yet:
		 * On TX completion, the GMAC set the USED bit only into the
		 * very first buffer descriptor of the sent frame.
		 * Otherwise it updates this descriptor with status error bits.
		 * This is the descriptor writeback.
		 */
		if ((desc->status & GMAC_TX_STATUS_USED) == 0)
			break;

		/* Process all buffers of the current transmitted frame */
		while ((desc->status & GMAC_TX_STATUS_LASTBUF) == 0) {
			RING_INC(q->tx_tail, q->tx_size);
			desc = &q->tx_desc[q->tx_tail];
		}

		/* Notify upper layer that a frame has been sent */
		if (q->tx_callbacks) {
			callback = q->tx_callbacks[q->tx_tail];
			if (callback)
				callback(queue, tsr);
		}

		/* Go to next frame */
		RING_INC(q->tx_tail, q->tx_size);
	}

	/* If a wakeup callback has been set, notify upper layer that it can
	   send more packets now */
	if (q->tx_wakeup_callback) {
		if (RING_SPACE(q->tx_head, q->tx_tail, q->tx_size) >=
				q->tx_wakeup_threshold) {
			q->tx_wakeup_callback(queue);
		}
	}
}
Beispiel #14
0
static int
nvc0_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
                  struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem)
{
    struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
    struct nouveau_bo *nvbo = nouveau_bo(bo);
    u64 src_offset = old_mem->start << PAGE_SHIFT;
    u64 dst_offset = new_mem->start << PAGE_SHIFT;
    u32 page_count = new_mem->num_pages;
    int ret;

    if (!nvbo->no_vm) {
        if (old_mem->mem_type == TTM_PL_VRAM)
            src_offset  = nvbo->vma.offset;
        else
            src_offset += dev_priv->gart_info.aper_base;

        if (new_mem->mem_type == TTM_PL_VRAM)
            dst_offset  = nvbo->vma.offset;
        else
            dst_offset += dev_priv->gart_info.aper_base;
    }

    page_count = new_mem->num_pages;
    while (page_count) {
        int line_count = (page_count > 2047) ? 2047 : page_count;

        ret = RING_SPACE(chan, 12);
        if (ret)
            return ret;

        BEGIN_NVC0(chan, 2, NvSubM2MF, 0x0238, 2);
        OUT_RING  (chan, upper_32_bits(dst_offset));
        OUT_RING  (chan, lower_32_bits(dst_offset));
        BEGIN_NVC0(chan, 2, NvSubM2MF, 0x030c, 6);
        OUT_RING  (chan, upper_32_bits(src_offset));
        OUT_RING  (chan, lower_32_bits(src_offset));
        OUT_RING  (chan, PAGE_SIZE); /* src_pitch */
        OUT_RING  (chan, PAGE_SIZE); /* dst_pitch */
        OUT_RING  (chan, PAGE_SIZE); /* line_length */
        OUT_RING  (chan, line_count);
        BEGIN_NVC0(chan, 2, NvSubM2MF, 0x0300, 1);
        OUT_RING  (chan, 0x00100110);

        page_count -= line_count;
        src_offset += (PAGE_SIZE * line_count);
        dst_offset += (PAGE_SIZE * line_count);
    }

    return 0;
}
static int
nv17_fence_sync(struct nouveau_fence *fence,
		struct nouveau_channel *prev, struct nouveau_channel *chan)
{
	struct nv10_fence_priv *priv = nv_engine(chan->dev, NVOBJ_ENGINE_FENCE);
	u32 value;
	int ret;

	if (!mutex_trylock(&prev->mutex))
		return -EBUSY;

	spin_lock(&priv->lock);
	value = priv->sequence;
	priv->sequence += 2;
	spin_unlock(&priv->lock);

	ret = RING_SPACE(prev, 5);
	if (!ret) {
		BEGIN_NV04(prev, 0, NV11_SUBCHAN_DMA_SEMAPHORE, 4);
		OUT_RING  (prev, NvSema);
		OUT_RING  (prev, 0);
		OUT_RING  (prev, value + 0);
		OUT_RING  (prev, value + 1);
		FIRE_RING (prev);
	}

	if (!ret && !(ret = RING_SPACE(chan, 5))) {
		BEGIN_NV04(chan, 0, NV11_SUBCHAN_DMA_SEMAPHORE, 4);
		OUT_RING  (chan, NvSema);
		OUT_RING  (chan, 0);
		OUT_RING  (chan, value + 1);
		OUT_RING  (chan, value + 2);
		FIRE_RING (chan);
	}

	mutex_unlock(&prev->mutex);
	return 0;
}
Beispiel #16
0
static int
nvc0_fence_sync32(struct nouveau_channel *chan, u64 virtual, u32 sequence)
{
	int ret = RING_SPACE(chan, 5);
	if (ret == 0) {
		BEGIN_NVC0(chan, 0, NV84_SUBCHAN_SEMAPHORE_ADDRESS_HIGH, 4);
		OUT_RING  (chan, upper_32_bits(virtual));
		OUT_RING  (chan, lower_32_bits(virtual));
		OUT_RING  (chan, sequence);
		OUT_RING  (chan, NV84_SUBCHAN_SEMAPHORE_TRIGGER_ACQUIRE_GEQUAL |
				 NVC0_SUBCHAN_SEMAPHORE_TRIGGER_YIELD);
		FIRE_RING (chan);
	}
	return ret;
}
Beispiel #17
0
static int
nvc0_fence_emit32(struct nouveau_channel *chan, u64 virtual, u32 sequence)
{
	int ret = RING_SPACE(chan, 6);
	if (ret == 0) {
		BEGIN_NVC0(chan, 0, NV84_SUBCHAN_SEMAPHORE_ADDRESS_HIGH, 5);
		OUT_RING  (chan, upper_32_bits(virtual));
		OUT_RING  (chan, lower_32_bits(virtual));
		OUT_RING  (chan, sequence);
		OUT_RING  (chan, NV84_SUBCHAN_SEMAPHORE_TRIGGER_WRITE_LONG);
		OUT_RING  (chan, 0x00000000);
		FIRE_RING (chan);
	}
	return ret;
}
Beispiel #18
0
static int
nv84_fence_sync32(struct nouveau_channel *chan, u64 virtual, u32 sequence)
{
	int ret = RING_SPACE(chan, 7);
	if (ret == 0) {
		BEGIN_NV04(chan, 0, NV11_SUBCHAN_DMA_SEMAPHORE, 1);
		OUT_RING  (chan, chan->vram);
		BEGIN_NV04(chan, 0, NV84_SUBCHAN_SEMAPHORE_ADDRESS_HIGH, 4);
		OUT_RING  (chan, upper_32_bits(virtual));
		OUT_RING  (chan, lower_32_bits(virtual));
		OUT_RING  (chan, sequence);
		OUT_RING  (chan, NV84_SUBCHAN_SEMAPHORE_TRIGGER_ACQUIRE_GEQUAL);
		FIRE_RING (chan);
	}
	return ret;
}
static int
nv84_fence_emit(struct nouveau_fence *fence)
{
	struct nouveau_channel *chan = fence->channel;
	int ret = RING_SPACE(chan, 7);
	if (ret == 0) {
		BEGIN_NV04(chan, 0, NV11_SUBCHAN_DMA_SEMAPHORE, 1);
		OUT_RING  (chan, NvSema);
		BEGIN_NV04(chan, 0, NV84_SUBCHAN_SEMAPHORE_ADDRESS_HIGH, 4);
		OUT_RING  (chan, upper_32_bits(chan->id * 16));
		OUT_RING  (chan, lower_32_bits(chan->id * 16));
		OUT_RING  (chan, fence->sequence);
		OUT_RING  (chan, NV84_SUBCHAN_SEMAPHORE_TRIGGER_WRITE_LONG);
		FIRE_RING (chan);
	}
	return ret;
}
static int
nv84_fence_sync(struct nouveau_fence *fence,
		struct nouveau_channel *prev, struct nouveau_channel *chan)
{
	int ret = RING_SPACE(chan, 7);
	if (ret == 0) {
		BEGIN_NV04(chan, 0, NV11_SUBCHAN_DMA_SEMAPHORE, 1);
		OUT_RING  (chan, NvSema);
		BEGIN_NV04(chan, 0, NV84_SUBCHAN_SEMAPHORE_ADDRESS_HIGH, 4);
		OUT_RING  (chan, upper_32_bits(prev->id * 16));
		OUT_RING  (chan, lower_32_bits(prev->id * 16));
		OUT_RING  (chan, fence->sequence);
		OUT_RING  (chan, NV84_SUBCHAN_SEMAPHORE_TRIGGER_ACQUIRE_GEQUAL);
		FIRE_RING (chan);
	}
	return ret;
}
Beispiel #21
0
static void
nv50_dac_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
		  struct drm_display_mode *adjusted_mode)
{
	struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
	struct drm_device *dev = encoder->dev;
	struct drm_nouveau_private *dev_priv = dev->dev_private;
	struct nouveau_channel *evo = dev_priv->evo;
	struct nouveau_crtc *crtc = nouveau_crtc(encoder->crtc);
	uint32_t mode_ctl = 0, mode_ctl2 = 0;
	int ret;

	NV_DEBUG_KMS(dev, "or %d type %d crtc %d\n",
		     nv_encoder->or, nv_encoder->dcb->type, crtc->index);

	nv50_dac_dpms(encoder, DRM_MODE_DPMS_ON);

	if (crtc->index == 1)
		mode_ctl |= NV50_EVO_DAC_MODE_CTRL_CRTC1;
	else
		mode_ctl |= NV50_EVO_DAC_MODE_CTRL_CRTC0;

	/* Lacking a working tv-out, this is not a 100% sure. */
	if (nv_encoder->dcb->type == OUTPUT_ANALOG)
		mode_ctl |= 0x40;
	else
	if (nv_encoder->dcb->type == OUTPUT_TV)
		mode_ctl |= 0x100;

	if (adjusted_mode->flags & DRM_MODE_FLAG_NHSYNC)
		mode_ctl2 |= NV50_EVO_DAC_MODE_CTRL2_NHSYNC;

	if (adjusted_mode->flags & DRM_MODE_FLAG_NVSYNC)
		mode_ctl2 |= NV50_EVO_DAC_MODE_CTRL2_NVSYNC;

	ret = RING_SPACE(evo, 3);
	if (ret) {
		NV_ERROR(dev, "no space while connecting DAC\n");
		return;
	}
	BEGIN_RING(evo, 0, NV50_EVO_DAC(nv_encoder->or, MODE_CTRL), 2);
	OUT_RING(evo, mode_ctl);
	OUT_RING(evo, mode_ctl2);

	nv_encoder->crtc = encoder->crtc;
}
Beispiel #22
0
static int
nouveau_fbcon_sync(struct fb_info *info)
{
#if 0
	struct nouveau_fbdev *nfbdev = info->par;
	struct drm_device *dev = nfbdev->dev;
	struct drm_nouveau_private *dev_priv = dev->dev_private;
	struct nouveau_channel *chan = dev_priv->channel;
	int ret, i;

	if (!chan || !chan->accel_done ||
	    info->state != FBINFO_STATE_RUNNING ||
	    info->flags & FBINFO_HWACCEL_DISABLED)
		return 0;

	if (RING_SPACE(chan, 4)) {
		nouveau_fbcon_gpu_lockup(info);
		return 0;
	}

	BEGIN_RING(chan, 0, 0x0104, 1);
	OUT_RING(chan, 0);
	BEGIN_RING(chan, 0, 0x0100, 1);
	OUT_RING(chan, 0);
	nouveau_bo_wr32(chan->notifier_bo, chan->m2mf_ntfy + 3, 0xffffffff);
	FIRE_RING(chan);

	ret = -EBUSY;
	for (i = 0; i < 100000; i++) {
		if (!nouveau_bo_rd32(chan->notifier_bo, chan->m2mf_ntfy + 3)) {
			ret = 0;
			break;
		}
		DRM_UDELAY(1);
	}

	if (ret) {
		nouveau_fbcon_gpu_lockup(info);
		return 0;
	}

	chan->accel_done = false;
#endif
	return 0;
}
Beispiel #23
0
static void
nv50_sor_disconnect(struct nouveau_encoder *nv_encoder)
{
	struct drm_device *dev = to_drm_encoder(nv_encoder)->dev;
	struct drm_nouveau_private *dev_priv = dev->dev_private;
	struct nouveau_channel *evo = dev_priv->evo;
	int ret;

	NV_DEBUG_KMS(dev, "Disconnecting SOR %d\n", nv_encoder->or);

	ret = RING_SPACE(evo, 2);
	if (ret) {
		NV_ERROR(dev, "no space while disconnecting SOR\n");
		return;
	}
	BEGIN_RING(evo, 0, NV50_EVO_SOR(nv_encoder->or, MODE_CTRL), 1);
	OUT_RING(evo, 0);
}
Beispiel #24
0
static void
nouveau_accel_init(struct nouveau_drm *drm)
{
	struct nouveau_device *device = nv_device(drm->device);
	struct nouveau_object *object;
	u32 arg0, arg1;
	int ret;

	if (nouveau_noaccel || !nouveau_fifo(device) /*XXX*/)
		return;

	/* initialise synchronisation routines */
	if      (device->card_type < NV_10) ret = nv04_fence_create(drm);
	else if (device->card_type < NV_11 ||
		 device->chipset   <  0x17) ret = nv10_fence_create(drm);
	else if (device->card_type < NV_50) ret = nv17_fence_create(drm);
	else if (device->chipset   <  0x84) ret = nv50_fence_create(drm);
	else if (device->card_type < NV_C0) ret = nv84_fence_create(drm);
	else                                ret = nvc0_fence_create(drm);
	if (ret) {
		NV_ERROR(drm, "failed to initialise sync subsystem, %d\n", ret);
		nouveau_accel_fini(drm);
		return;
	}

	if (device->card_type >= NV_E0) {
		ret = nouveau_channel_new(drm, &drm->client, NVDRM_DEVICE,
					  NVDRM_CHAN + 1,
					  NVE0_CHANNEL_IND_ENGINE_CE0 |
					  NVE0_CHANNEL_IND_ENGINE_CE1, 0,
					  &drm->cechan);
		if (ret)
			NV_ERROR(drm, "failed to create ce channel, %d\n", ret);

		arg0 = NVE0_CHANNEL_IND_ENGINE_GR;
		arg1 = 1;
	} else
	if (device->chipset >= 0xa3 &&
	    device->chipset != 0xaa &&
	    device->chipset != 0xac) {
		ret = nouveau_channel_new(drm, &drm->client, NVDRM_DEVICE,
					  NVDRM_CHAN + 1, NvDmaFB, NvDmaTT,
					  &drm->cechan);
		if (ret)
			NV_ERROR(drm, "failed to create ce channel, %d\n", ret);

		arg0 = NvDmaFB;
		arg1 = NvDmaTT;
	} else {
		arg0 = NvDmaFB;
		arg1 = NvDmaTT;
	}

	ret = nouveau_channel_new(drm, &drm->client, NVDRM_DEVICE, NVDRM_CHAN,
				  arg0, arg1, &drm->channel);
	if (ret) {
		NV_ERROR(drm, "failed to create kernel channel, %d\n", ret);
		nouveau_accel_fini(drm);
		return;
	}

	ret = nouveau_object_new(nv_object(drm), NVDRM_CHAN, NVDRM_NVSW,
				 nouveau_abi16_swclass(drm), NULL, 0, &object);
	if (ret == 0) {
		struct nouveau_software_chan *swch = (void *)object->parent;
		ret = RING_SPACE(drm->channel, 2);
		if (ret == 0) {
			if (device->card_type < NV_C0) {
				BEGIN_NV04(drm->channel, NvSubSw, 0, 1);
				OUT_RING  (drm->channel, NVDRM_NVSW);
			} else
			if (device->card_type < NV_E0) {
				BEGIN_NVC0(drm->channel, FermiSw, 0, 1);
				OUT_RING  (drm->channel, 0x001f0000);
			}
		}
		swch = (void *)object->parent;
		swch->flip = nouveau_flip_complete;
		swch->flip_data = drm->channel;
	}

	if (ret) {
		NV_ERROR(drm, "failed to allocate software object, %d\n", ret);
		nouveau_accel_fini(drm);
		return;
	}

	if (device->card_type < NV_C0) {
		ret = nouveau_gpuobj_new(drm->device, NULL, 32, 0, 0,
					&drm->notify);
		if (ret) {
			NV_ERROR(drm, "failed to allocate notifier, %d\n", ret);
			nouveau_accel_fini(drm);
			return;
		}

		ret = nouveau_object_new(nv_object(drm),
					 drm->channel->handle, NvNotify0,
					 0x003d, &(struct nv_dma_class) {
						.flags = NV_DMA_TARGET_VRAM |
							 NV_DMA_ACCESS_RDWR,
						.start = drm->notify->addr,
						.limit = drm->notify->addr + 31
						}, sizeof(struct nv_dma_class),
Beispiel #25
0
static int
nouveau_channel_init(struct nouveau_channel *chan, u32 vram, u32 gart)
{
	struct nvif_device *device = chan->device;
	struct nouveau_cli *cli = (void *)chan->user.client;
	struct nvkm_mmu *mmu = nvxx_mmu(device);
	struct nv_dma_v0 args = {};
	int ret, i;

	nvif_object_map(&chan->user);

	/* allocate dma objects to cover all allowed vram, and gart */
	if (device->info.family < NV_DEVICE_INFO_V0_FERMI) {
		if (device->info.family >= NV_DEVICE_INFO_V0_TESLA) {
			args.target = NV_DMA_V0_TARGET_VM;
			args.access = NV_DMA_V0_ACCESS_VM;
			args.start = 0;
			args.limit = cli->vm->mmu->limit - 1;
		} else {
			args.target = NV_DMA_V0_TARGET_VRAM;
			args.access = NV_DMA_V0_ACCESS_RDWR;
			args.start = 0;
			args.limit = device->info.ram_user - 1;
		}

		ret = nvif_object_init(&chan->user, vram, NV_DMA_IN_MEMORY,
				       &args, sizeof(args), &chan->vram);
		if (ret)
			return ret;

		if (device->info.family >= NV_DEVICE_INFO_V0_TESLA) {
			args.target = NV_DMA_V0_TARGET_VM;
			args.access = NV_DMA_V0_ACCESS_VM;
			args.start = 0;
			args.limit = cli->vm->mmu->limit - 1;
		} else
		if (chan->drm->agp.bridge) {
			args.target = NV_DMA_V0_TARGET_AGP;
			args.access = NV_DMA_V0_ACCESS_RDWR;
			args.start = chan->drm->agp.base;
			args.limit = chan->drm->agp.base +
				     chan->drm->agp.size - 1;
		} else {
			args.target = NV_DMA_V0_TARGET_VM;
			args.access = NV_DMA_V0_ACCESS_RDWR;
			args.start = 0;
			args.limit = mmu->limit - 1;
		}

		ret = nvif_object_init(&chan->user, gart, NV_DMA_IN_MEMORY,
				       &args, sizeof(args), &chan->gart);
		if (ret)
			return ret;
	}

	/* initialise dma tracking parameters */
	switch (chan->user.oclass & 0x00ff) {
	case 0x006b:
	case 0x006e:
		chan->user_put = 0x40;
		chan->user_get = 0x44;
		chan->dma.max = (0x10000 / 4) - 2;
		break;
	default:
		chan->user_put = 0x40;
		chan->user_get = 0x44;
		chan->user_get_hi = 0x60;
		chan->dma.ib_base =  0x10000 / 4;
		chan->dma.ib_max  = (0x02000 / 8) - 1;
		chan->dma.ib_put  = 0;
		chan->dma.ib_free = chan->dma.ib_max - chan->dma.ib_put;
		chan->dma.max = chan->dma.ib_base;
		break;
	}

	chan->dma.put = 0;
	chan->dma.cur = chan->dma.put;
	chan->dma.free = chan->dma.max - chan->dma.cur;

	ret = RING_SPACE(chan, NOUVEAU_DMA_SKIPS);
	if (ret)
		return ret;

	for (i = 0; i < NOUVEAU_DMA_SKIPS; i++)
		OUT_RING(chan, 0x00000000);

	/* allocate software object class (used for fences on <= nv05) */
	if (device->info.family < NV_DEVICE_INFO_V0_CELSIUS) {
		ret = nvif_object_init(&chan->user, 0x006e,
				       NVIF_CLASS_SW_NV04,
				       NULL, 0, &chan->nvsw);
		if (ret)
			return ret;

		ret = RING_SPACE(chan, 2);
		if (ret)
			return ret;

		BEGIN_NV04(chan, NvSubSw, 0x0000, 1);
		OUT_RING  (chan, chan->nvsw.handle);
		FIRE_RING (chan);
	}

	/* initialise synchronisation */
	return nouveau_fence(chan->drm)->context_new(chan);
}
/**
 * \brief Send a frame splitted into buffers. If the frame size is larger than transfer buffer size
 * error returned. If frame transfer status is monitored, specify callback for each frame.
 *  \param gmacd Pointer to GMAC Driver instance.
 *  \param sgl Pointer to a scatter-gather list describing the buffers of the ethernet frame.
 *  \param fTxCb Pointer to callback function.
 */
uint8_t gmacd_send_sg(struct _gmacd* gmacd, uint8_t queue,
		const struct _gmac_sg_list* sgl, gmacd_callback_t callback)
{
	Gmac* gmac = gmacd->gmac;
	struct _gmacd_queue* q = &gmacd->queues[queue];
	struct _gmac_desc* desc;
	uint16_t idx, tx_head;
	int i;

	if (callback && !q->tx_callbacks) {
		trace_error("Cannot set send callback, no tx_callbacks "\
				"buffer configured for queue %u", queue);
	}

	/* Check parameter */
	if (!sgl->size) {
		trace_error("gmacd_send_sg: ethernet frame is empty.\r\n");
		return GMACD_PARAM;
	}
	if (sgl->size >= q->tx_size) {
		trace_error("gmacd_send_sg: ethernet frame has too many buffers.\r\n");
		return GMACD_PARAM;
	}

	/* Check available space */
	if (RING_SPACE(q->tx_head, q->tx_tail, q->tx_size) < sgl->size) {
		trace_error("gmacd_send_sg: not enough free buffers in TX queue.\r\n");
		return GMACD_TX_BUSY;
	}

	/* Tag end of TX queue */
	tx_head = fixed_mod(q->tx_head + sgl->size, q->tx_size);
	idx = tx_head;
	if (q->tx_callbacks)
		q->tx_callbacks[idx] = NULL;
	desc = &q->tx_desc[idx];
	desc->status |= GMAC_TX_STATUS_USED;

	/* Update buffer descriptors in reverse order to avoid a race
	 * condition with hardware.
	 */
	for (i = sgl->size - 1; i >= 0; i--) {
		const struct _gmac_sg *sg = &sgl->entries[i];
		uint32_t status;

		if (sg->size > GMAC_TX_UNITSIZE) {
			trace_error("gmacd_send_sg: buffer size is too big.\r\n");
			return GMACD_PARAM;
		}

		RING_DEC(idx, q->tx_size);

		/* Reset TX callback */
		if (q->tx_callbacks)
			q->tx_callbacks[idx] = NULL;

		desc = &q->tx_desc[idx];

		/* Copy data into transmittion buffer */
		if (sg->buffer && sg->size) {
			memcpy((void*)desc->addr, sg->buffer, sg->size);
			l2cc_clean_region(desc->addr, desc->addr + sg->size);
		}

		/* Compute buffer descriptor status word */
		status = sg->size & GMAC_RX_STATUS_LENGTH_MASK;
		if (i == (sgl->size - 1)) {
			status |= GMAC_TX_STATUS_LASTBUF;
			if (q->tx_callbacks)
				q->tx_callbacks[idx] = callback;
		}
		if (idx == (q->tx_size - 1)) {
			status |= GMAC_TX_STATUS_WRAP;
		}

		/* Update buffer descriptor status word: clear USED bit */
		desc->status = status;
		DSB();
	}

	/* Update TX ring buffer pointers */
	q->tx_head = tx_head;

	/* Now start to transmit if it is not already done */
	gmac_start_transmission(gmac);

	return GMACD_OK;
}
Beispiel #27
0
static int
nv50_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
                  struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem)
{
    struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
    struct nouveau_bo *nvbo = nouveau_bo(bo);
    u64 length = (new_mem->num_pages << PAGE_SHIFT);
    u64 src_offset, dst_offset;
    int ret;

    src_offset = old_mem->start << PAGE_SHIFT;
    dst_offset = new_mem->start << PAGE_SHIFT;
    if (!nvbo->no_vm) {
        if (old_mem->mem_type == TTM_PL_VRAM)
            src_offset  = nvbo->vma.offset;
        else
            src_offset += dev_priv->gart_info.aper_base;

        if (new_mem->mem_type == TTM_PL_VRAM)
            dst_offset  = nvbo->vma.offset;
        else
            dst_offset += dev_priv->gart_info.aper_base;
    }

    ret = RING_SPACE(chan, 3);
    if (ret)
        return ret;

    BEGIN_RING(chan, NvSubM2MF, 0x0184, 2);
    OUT_RING  (chan, nouveau_bo_mem_ctxdma(bo, chan, old_mem));
    OUT_RING  (chan, nouveau_bo_mem_ctxdma(bo, chan, new_mem));

    while (length) {
        u32 amount, stride, height;

        amount  = min(length, (u64)(4 * 1024 * 1024));
        stride  = 16 * 4;
        height  = amount / stride;

        if (new_mem->mem_type == TTM_PL_VRAM &&
                nouveau_bo_tile_layout(nvbo)) {
            ret = RING_SPACE(chan, 8);
            if (ret)
                return ret;

            BEGIN_RING(chan, NvSubM2MF, 0x0200, 7);
            OUT_RING  (chan, 0);
            OUT_RING  (chan, 0);
            OUT_RING  (chan, stride);
            OUT_RING  (chan, height);
            OUT_RING  (chan, 1);
            OUT_RING  (chan, 0);
            OUT_RING  (chan, 0);
        } else {
            ret = RING_SPACE(chan, 2);
            if (ret)
                return ret;

            BEGIN_RING(chan, NvSubM2MF, 0x0200, 1);
            OUT_RING  (chan, 1);
        }
        if (old_mem->mem_type == TTM_PL_VRAM &&
                nouveau_bo_tile_layout(nvbo)) {
            ret = RING_SPACE(chan, 8);
            if (ret)
                return ret;

            BEGIN_RING(chan, NvSubM2MF, 0x021c, 7);
            OUT_RING  (chan, 0);
            OUT_RING  (chan, 0);
            OUT_RING  (chan, stride);
            OUT_RING  (chan, height);
            OUT_RING  (chan, 1);
            OUT_RING  (chan, 0);
            OUT_RING  (chan, 0);
        } else {
            ret = RING_SPACE(chan, 2);
            if (ret)
                return ret;

            BEGIN_RING(chan, NvSubM2MF, 0x021c, 1);
            OUT_RING  (chan, 1);
        }

        ret = RING_SPACE(chan, 14);
        if (ret)
            return ret;

        BEGIN_RING(chan, NvSubM2MF, 0x0238, 2);
        OUT_RING  (chan, upper_32_bits(src_offset));
        OUT_RING  (chan, upper_32_bits(dst_offset));
        BEGIN_RING(chan, NvSubM2MF, 0x030c, 8);
        OUT_RING  (chan, lower_32_bits(src_offset));
        OUT_RING  (chan, lower_32_bits(dst_offset));
        OUT_RING  (chan, stride);
        OUT_RING  (chan, stride);
        OUT_RING  (chan, stride);
        OUT_RING  (chan, height);
        OUT_RING  (chan, 0x00000101);
        OUT_RING  (chan, 0x00000000);
        BEGIN_RING(chan, NvSubM2MF, NV_MEMORY_TO_MEMORY_FORMAT_NOP, 1);
        OUT_RING  (chan, 0);

        length -= amount;
        src_offset += amount;
        dst_offset += amount;
    }

    return 0;
}
Beispiel #28
0
static void
nouveau_accel_init(struct nouveau_drm *drm)
{
	struct nvif_device *device = &drm->device;
	struct nvif_sclass *sclass;
	u32 arg0, arg1;
	int ret, i, n;

	if (nouveau_noaccel)
		return;

	/* initialise synchronisation routines */
	/*XXX: this is crap, but the fence/channel stuff is a little
	 *     backwards in some places.  this will be fixed.
	 */
	ret = n = nvif_object_sclass_get(&device->object, &sclass);
	if (ret < 0)
		return;

	for (ret = -ENOSYS, i = 0; i < n; i++) {
		switch (sclass[i].oclass) {
		case NV03_CHANNEL_DMA:
			ret = nv04_fence_create(drm);
			break;
		case NV10_CHANNEL_DMA:
			ret = nv10_fence_create(drm);
			break;
		case NV17_CHANNEL_DMA:
		case NV40_CHANNEL_DMA:
			ret = nv17_fence_create(drm);
			break;
		case NV50_CHANNEL_GPFIFO:
			ret = nv50_fence_create(drm);
			break;
		case G82_CHANNEL_GPFIFO:
			ret = nv84_fence_create(drm);
			break;
		case FERMI_CHANNEL_GPFIFO:
		case KEPLER_CHANNEL_GPFIFO_A:
		case MAXWELL_CHANNEL_GPFIFO_A:
			ret = nvc0_fence_create(drm);
			break;
		default:
			break;
		}
	}

	nvif_object_sclass_put(&sclass);
	if (ret) {
		NV_ERROR(drm, "failed to initialise sync subsystem, %d\n", ret);
		nouveau_accel_fini(drm);
		return;
	}

	if (device->info.family >= NV_DEVICE_INFO_V0_KEPLER) {
		ret = nouveau_channel_new(drm, &drm->device, NVDRM_CHAN + 1,
					  KEPLER_CHANNEL_GPFIFO_A_V0_ENGINE_CE0|
					  KEPLER_CHANNEL_GPFIFO_A_V0_ENGINE_CE1,
					  0, &drm->cechan);
		if (ret)
			NV_ERROR(drm, "failed to create ce channel, %d\n", ret);

		arg0 = KEPLER_CHANNEL_GPFIFO_A_V0_ENGINE_GR;
		arg1 = 1;
	} else
	if (device->info.chipset >= 0xa3 &&
	    device->info.chipset != 0xaa &&
	    device->info.chipset != 0xac) {
		ret = nouveau_channel_new(drm, &drm->device, NVDRM_CHAN + 1,
					  NvDmaFB, NvDmaTT, &drm->cechan);
		if (ret)
			NV_ERROR(drm, "failed to create ce channel, %d\n", ret);

		arg0 = NvDmaFB;
		arg1 = NvDmaTT;
	} else {
		arg0 = NvDmaFB;
		arg1 = NvDmaTT;
	}

	ret = nouveau_channel_new(drm, &drm->device, NVDRM_CHAN, arg0, arg1,
				 &drm->channel);
	if (ret) {
		NV_ERROR(drm, "failed to create kernel channel, %d\n", ret);
		nouveau_accel_fini(drm);
		return;
	}

	ret = nvif_object_init(&drm->channel->user, NVDRM_NVSW,
			       nouveau_abi16_swclass(drm), NULL, 0, &drm->nvsw);
	if (ret == 0) {
		ret = RING_SPACE(drm->channel, 2);
		if (ret == 0) {
			if (device->info.family < NV_DEVICE_INFO_V0_FERMI) {
				BEGIN_NV04(drm->channel, NvSubSw, 0, 1);
				OUT_RING  (drm->channel, NVDRM_NVSW);
			} else
			if (device->info.family < NV_DEVICE_INFO_V0_KEPLER) {
				BEGIN_NVC0(drm->channel, FermiSw, 0, 1);
				OUT_RING  (drm->channel, 0x001f0000);
			}
		}

		ret = nvif_notify_init(&drm->nvsw, nouveau_flip_complete,
				       false, NVSW_NTFY_UEVENT, NULL, 0, 0,
				       &drm->flip);
		if (ret == 0)
			ret = nvif_notify_get(&drm->flip);
		if (ret) {
			nouveau_accel_fini(drm);
			return;
		}
	}

	if (ret) {
		NV_ERROR(drm, "failed to allocate software object, %d\n", ret);
		nouveau_accel_fini(drm);
		return;
	}

	if (device->info.family < NV_DEVICE_INFO_V0_FERMI) {
		ret = nvkm_gpuobj_new(nvxx_device(&drm->device), 32, 0, false,
				      NULL, &drm->notify);
		if (ret) {
			NV_ERROR(drm, "failed to allocate notifier, %d\n", ret);
			nouveau_accel_fini(drm);
			return;
		}

		ret = nvif_object_init(&drm->channel->user, NvNotify0,
				       NV_DMA_IN_MEMORY,
				       &(struct nv_dma_v0) {
						.target = NV_DMA_V0_TARGET_VRAM,
						.access = NV_DMA_V0_ACCESS_RDWR,
						.start = drm->notify->addr,
						.limit = drm->notify->addr + 31
				       }, sizeof(struct nv_dma_v0),
Beispiel #29
0
static void
nv04_fbcon_imageblit(struct fb_info *info, const struct fb_image *image)
{
	struct nouveau_fbcon_par *par = info->par;
	struct drm_device *dev = par->dev;
	struct drm_nouveau_private *dev_priv = dev->dev_private;
	struct nouveau_channel *chan = dev_priv->channel;
	uint32_t fg;
	uint32_t bg;
	uint32_t dsize;
	uint32_t width;
	uint32_t *data = (uint32_t *)image->data;

	if (info->state != FBINFO_STATE_RUNNING)
		return;

	if (image->depth != 1) {
		cfb_imageblit(info, image);
		return;
	}

	if (!(info->flags & FBINFO_HWACCEL_DISABLED) && RING_SPACE(chan, 8)) {
		nouveau_fbcon_gpu_lockup(info);
	}

	if (info->flags & FBINFO_HWACCEL_DISABLED) {
		cfb_imageblit(info, image);
		return;
	}

	width = (image->width + 31) & ~31;
	dsize = (width * image->height) >> 5;

	if (info->fix.visual == FB_VISUAL_TRUECOLOR ||
	    info->fix.visual == FB_VISUAL_DIRECTCOLOR) {
		fg = ((uint32_t *) info->pseudo_palette)[image->fg_color];
		bg = ((uint32_t *) info->pseudo_palette)[image->bg_color];
	} else {
		fg = image->fg_color;
		bg = image->bg_color;
	}

	BEGIN_RING(chan, NvSubGdiRect, 0x0be4, 7);
	OUT_RING(chan, (image->dy << 16) | (image->dx & 0xffff));
	OUT_RING(chan, ((image->dy + image->height) << 16) |
			 ((image->dx + image->width) & 0xffff));
	OUT_RING(chan, bg);
	OUT_RING(chan, fg);
	OUT_RING(chan, (image->height << 16) | image->width);
	OUT_RING(chan, (image->height << 16) | width);
	OUT_RING(chan, (image->dy << 16) | (image->dx & 0xffff));

	while (dsize) {
		int iter_len = dsize > 128 ? 128 : dsize;

		if (RING_SPACE(chan, iter_len + 1)) {
			nouveau_fbcon_gpu_lockup(info);
			cfb_imageblit(info, image);
			return;
		}

		BEGIN_RING(chan, NvSubGdiRect, 0x0c00, iter_len);
		OUT_RINGp(chan, data, iter_len);
		data += iter_len;
		dsize -= iter_len;
	}

	FIRE_RING(chan);
}