Пример #1
0
static void imapx200_lli_to_regs(struct imapx200_dma_chan *chan,
                                 struct dw_lli *lli)
{
    pr_debug("%s: LLI %p => regs\n", __func__, lli);

    chan_writel(chan,SAR,lli->sar);
    chan_writel(chan,DAR,lli->dar);
    chan_writel(chan,LLP,lli->llp);
    chan_writel(chan,CTL_LO,lli->ctllo);
    chan_writel(chan,CTL_HI,lli->ctlhi);
}
Пример #2
0
static inline void dma_chan_irq(struct hsu_dma_chan *chan)
{
	struct uart_hsu_port *up = chan->uport;
	unsigned long flags;
	u32 int_sts;

	spin_lock_irqsave(&up->port.lock, flags);

	if (!up->use_dma || !up->running)
		goto exit;

	/*
	 * No matter what situation, need read clear the IRQ status
	 * There is a bug, see Errata 5, HSD 2900918
	 */
	int_sts = chan_readl(chan, HSU_CH_SR);

	/* Rx channel */
	if (chan->dirt == DMA_FROM_DEVICE)
		hsu_dma_rx(up, int_sts);

	/* Tx channel */
	if (chan->dirt == DMA_TO_DEVICE) {
		chan_writel(chan, HSU_CH_CR, 0x0);
		up->dma_tx_on = 0;
		hsu_dma_tx(up);
	}

exit:
	spin_unlock_irqrestore(&up->port.lock, flags);
	return;
}
Пример #3
0
static int imapx200_dma_stop(struct imapx200_dma_chan *chan)
{
    struct imapx200_dmac *dmac= chan->dmac;
    u32 config;
    int timeout;

    pr_debug("%s: stopping channel\n", __func__);

    config = chan_readl(chan,CFG_LO);
    config |= DWC_CFGL_CH_SUSP;
    chan_writel(chan,CFG_LO,config);

    timeout = 1000;
    do {
        config = chan_readl(chan,CFG_LO);
        pr_debug("%s: %d - config %08x\n", __func__, timeout, config);
        if (config & DWC_CFGL_CH_SUSP)
            udelay(10);
        else
            break;
    } while (--timeout > 0);

    if (config & DWC_CFGL_CH_SUSP) {
        printk(KERN_ERR "%s: channel still active\n", __func__);
        return -EFAULT;
    }

    dma_clear_bit(dmac,CH_EN,chan->bit);
    return 0;
}
Пример #4
0
void hsu_dma_tx(struct uart_hsu_port *up)
{
	struct circ_buf *xmit = &up->port.state->xmit;
	struct hsu_dma_buffer *dbuf = &up->txbuf;
	int count;

	/* test_and_set_bit may be better, but anyway it's in lock protected mode */
	if (up->dma_tx_on)
		return;

	/* Update the circ buf info */
	xmit->tail += dbuf->ofs;
	xmit->tail &= UART_XMIT_SIZE - 1;

	up->port.icount.tx += dbuf->ofs;
	dbuf->ofs = 0;

	/* Disable the channel */
	chan_writel(up->txc, HSU_CH_CR, 0x0);

	if (!uart_circ_empty(xmit) && !uart_tx_stopped(&up->port)) {
		dma_sync_single_for_device(up->port.dev,
					   dbuf->dma_addr,
					   dbuf->dma_size,
					   DMA_TO_DEVICE);

		count = CIRC_CNT_TO_END(xmit->head, xmit->tail, UART_XMIT_SIZE);
		dbuf->ofs = count;

		/* Reprogram the channel */
		chan_writel(up->txc, HSU_CH_D0SAR, dbuf->dma_addr + xmit->tail);
		chan_writel(up->txc, HSU_CH_D0TSR, count);

		/* Reenable the channel */
		chan_writel(up->txc, HSU_CH_DCR, 0x1
						 | (0x1 << 8)
						 | (0x1 << 16)
						 | (0x1 << 24));
		up->dma_tx_on = 1;
		chan_writel(up->txc, HSU_CH_CR, 0x1);
	}

	if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
		uart_write_wakeup(&up->port);
}
Пример #5
0
/* This is always called in spinlock protected mode, so
 * modify timeout timer is safe here */
void hsu_dma_rx(struct uart_hsu_port *up, u32 int_sts)
{
	struct hsu_dma_buffer *dbuf = &up->rxbuf;
	struct hsu_dma_chan *chan = up->rxc;
	struct uart_port *port = &up->port;
	struct tty_struct *tty = port->state->port.tty;
	int count;

	if (!tty)
		return;

	/*
	 * First need to know how many is already transferred,
	 * then check if its a timeout DMA irq, and return
	 * the trail bytes out, push them up and reenable the
	 * channel
	 */

	/* Timeout IRQ, need wait some time, see Errata 2 */
	if (int_sts & 0xf00)
		udelay(2);

	/* Stop the channel */
	chan_writel(chan, HSU_CH_CR, 0x0);

	count = chan_readl(chan, HSU_CH_D0SAR) - dbuf->dma_addr;
	if (!count) {
		/* Restart the channel before we leave */
		chan_writel(chan, HSU_CH_CR, 0x3);
		return;
	}

	dma_sync_single_for_cpu(port->dev, dbuf->dma_addr,
			dbuf->dma_size, DMA_FROM_DEVICE);

	/*
	 * Head will only wrap around when we recycle
	 * the DMA buffer, and when that happens, we
	 * explicitly set tail to 0. So head will
	 * always be greater than tail.
	 */
	tty_insert_flip_string(tty, dbuf->buf, count);
	port->icount.rx += count;

	dma_sync_single_for_device(up->port.dev, dbuf->dma_addr,
			dbuf->dma_size, DMA_FROM_DEVICE);

	/* Reprogram the channel */
	chan_writel(chan, HSU_CH_D0SAR, dbuf->dma_addr);
	chan_writel(chan, HSU_CH_D0TSR, dbuf->dma_size);
	chan_writel(chan, HSU_CH_DCR, 0x1
					 | (0x1 << 8)
					 | (0x1 << 16)
					 | (0x1 << 24)	/* timeout bit, see HSU Errata 1 */
					 );
	tty_flip_buffer_push(tty);

	chan_writel(chan, HSU_CH_CR, 0x3);

}
Пример #6
0
static void serial_hsu_stop_tx(struct uart_port *port)
{
	struct uart_hsu_port *up =
		container_of(port, struct uart_hsu_port, port);
	struct hsu_dma_chan *txc = up->txc;

	if (up->use_dma)
		chan_writel(txc, HSU_CH_CR, 0x0);
	else if (up->ier & UART_IER_THRI) {
		up->ier &= ~UART_IER_THRI;
		serial_out(up, UART_IER, up->ier);
	}
}
Пример #7
0
static void serial_hsu_stop_rx(struct uart_port *port)
{
	struct uart_hsu_port *up =
		container_of(port, struct uart_hsu_port, port);
	struct hsu_dma_chan *chan = up->rxc;

	if (up->use_dma)
		chan_writel(chan, HSU_CH_CR, 0x2);
	else {
		up->ier &= ~UART_IER_RLSI;
		up->port.read_status_mask &= ~UART_LSR_DR;
		serial_out(up, UART_IER, up->ier);
	}
}
Пример #8
0
/* The buffer is already cache coherent */
void hsu_dma_start_rx_chan(struct hsu_dma_chan *rxc, struct hsu_dma_buffer *dbuf)
{
	dbuf->ofs = 0;

	chan_writel(rxc, HSU_CH_BSR, 32);
	chan_writel(rxc, HSU_CH_MOTSR, 4);

	chan_writel(rxc, HSU_CH_D0SAR, dbuf->dma_addr);
	chan_writel(rxc, HSU_CH_D0TSR, dbuf->dma_size);
	chan_writel(rxc, HSU_CH_DCR, 0x1 | (0x1 << 8)
					 | (0x1 << 16)
					 | (0x1 << 24)	/* timeout bit, see HSU Errata 1 */
					 );
	chan_writel(rxc, HSU_CH_CR, 0x3);
}
Пример #9
0
/* The buffer is already cache coherent */
void hsu_dma_start_rx_chan(struct hsu_dma_chan *rxc, struct hsu_dma_buffer *dbuf)
{
	dbuf->ofs = 0;

	chan_writel(rxc, HSU_CH_BSR, 32);
	chan_writel(rxc, HSU_CH_MOTSR, 4);

	chan_writel(rxc, HSU_CH_D0SAR, dbuf->dma_addr);
	chan_writel(rxc, HSU_CH_D0TSR, dbuf->dma_size);
	chan_writel(rxc, HSU_CH_DCR, 0x1 | (0x1 << 8)
					 | (0x1 << 16)
					 | (0x1 << 24)	/* timeout bit, see HSU Errata 1 */
					 );
	chan_writel(rxc, HSU_CH_CR, 0x3);

	mod_timer(&rxc->rx_timer, jiffies + HSU_DMA_TIMEOUT_CHECK_FREQ);
}
Пример #10
0
/*
 * What special to do:
 * 1. chose the 64B fifo mode
 * 2. make sure not to select half empty mode for A0 stepping
 * 3. start dma or pio depends on configuration
 * 4. we only allocate dma memory when needed
 */
static int serial_hsu_startup(struct uart_port *port)
{
	struct uart_hsu_port *up =
		container_of(port, struct uart_hsu_port, port);
	unsigned long flags;

	/*
	 * Clear the FIFO buffers and disable them.
	 * (they will be reenabled in set_termios())
	 */
	serial_out(up, UART_FCR, UART_FCR_ENABLE_FIFO);
	serial_out(up, UART_FCR, UART_FCR_ENABLE_FIFO |
			UART_FCR_CLEAR_RCVR | UART_FCR_CLEAR_XMIT);
	serial_out(up, UART_FCR, 0);

	/* Clear the interrupt registers. */
	(void) serial_in(up, UART_LSR);
	(void) serial_in(up, UART_RX);
	(void) serial_in(up, UART_IIR);
	(void) serial_in(up, UART_MSR);

	/* Now, initialize the UART, default is 8n1 */
	serial_out(up, UART_LCR, UART_LCR_WLEN8);

	spin_lock_irqsave(&up->port.lock, flags);

	up->port.mctrl |= TIOCM_OUT2;
	serial_hsu_set_mctrl(&up->port, up->port.mctrl);

	/*
	 * Finally, enable interrupts.  Note: Modem status interrupts
	 * are set via set_termios(), which will be occurring imminently
	 * anyway, so we don't enable them here.
	 */
	if (!up->use_dma)
		up->ier = UART_IER_RLSI | UART_IER_RDI | UART_IER_RTOIE;
	else
		up->ier = 0;
	serial_out(up, UART_IER, up->ier);

	spin_unlock_irqrestore(&up->port.lock, flags);

	/* DMA init */
	if (up->use_dma) {
		struct hsu_dma_buffer *dbuf;
		struct circ_buf *xmit = &port->state->xmit;

		up->dma_tx_on = 0;

		/* First allocate the RX buffer */
		dbuf = &up->rxbuf;
		dbuf->buf = kzalloc(HSU_DMA_BUF_SIZE, GFP_KERNEL);
		if (!dbuf->buf) {
			up->use_dma = 0;
			goto exit;
		}
		dbuf->dma_addr = dma_map_single(port->dev,
						dbuf->buf,
						HSU_DMA_BUF_SIZE,
						DMA_FROM_DEVICE);
		dbuf->dma_size = HSU_DMA_BUF_SIZE;

		/* Start the RX channel right now */
		hsu_dma_start_rx_chan(up->rxc, dbuf);

		/* Next init the TX DMA */
		dbuf = &up->txbuf;
		dbuf->buf = xmit->buf;
		dbuf->dma_addr = dma_map_single(port->dev,
					       dbuf->buf,
					       UART_XMIT_SIZE,
					       DMA_TO_DEVICE);
		dbuf->dma_size = UART_XMIT_SIZE;

		/* This should not be changed all around */
		chan_writel(up->txc, HSU_CH_BSR, 32);
		chan_writel(up->txc, HSU_CH_MOTSR, 4);
		dbuf->ofs = 0;
	}

exit:
	 /* And clear the interrupt registers again for luck. */
	(void) serial_in(up, UART_LSR);
	(void) serial_in(up, UART_RX);
	(void) serial_in(up, UART_IIR);
	(void) serial_in(up, UART_MSR);

	up->running = 1;
	return 0;
}
Пример #11
0
int imapx200_dma_devconfig(int channel,
                           enum imapx200_dmafc source,
                           unsigned long devaddr)
{
    struct imapx200_dma_chan *chan = imap_dma_lookup_channel(channel);
    u32 peripheral;
    u32 ctlx = 0;
    u32 intrx = 0;
    u32 cfg_hi = 0;
    u32 cfg_lo = 0;

    pr_debug("%s: channel %d, source %d, dev %08lx, chan %p\n",
             __func__, channel, source, devaddr, chan);

    WARN_ON(!chan);
    if (!chan)
        return -EINVAL;

    peripheral = (chan->peripheral & 0x7);
    chan->source = source;
    chan->dev_addr = devaddr;

    pr_debug("%s: peripheral %d\n", __func__, peripheral);
    ctlx = chan_readl(chan,CTL_LO);
    pr_debug("devconfig_1\n");
    ctlx &= ~DWC_CTLL_FC_MASK;
    cfg_hi = chan_readl(chan,CFG_HI);
    pr_debug("CFG_HI is %x\n", cfg_hi);
    cfg_lo = chan_readl(chan,CFG_LO);
    pr_debug("devconfig_3\n");
    switch (source) {
    case IMAPX200_DMA_M2M:
        ctlx |= DWC_CTLL_FC_M2M;
        break;
    case IMAPX200_DMA_M2P:
        ctlx |= DWC_CTLL_FC_M2P;
        cfg_lo &= ~DWC_CFGL_HS_DST;
        cfg_hi |= DWC_CFGH_DST_PER(chan->client->handshake);
        break;
    case IMAPX200_DMA_P2M:
        ctlx |= DWC_CTLL_FC_P2M;
        cfg_lo &= ~DWC_CFGL_HS_SRC;
        cfg_hi |= DWC_CFGH_SRC_PER(chan->client->handshake);
        break;
    default:
        printk(KERN_ERR "%s: bad source\n", __func__);
        return -EINVAL;
    }
    /*set dma flow control bit*/
    chan_writel(chan,CTL_LO,ctlx);
    pr_debug("devconfig_4\n");
    chan_writel(chan,CFG_LO,cfg_lo);
    pr_debug("devconfig_5\n");
    chan_writel(chan,CFG_HI,cfg_hi);
//	cfg_hi = chan_readl(chan,CFG_HI);
//	pr_debug("CFG_HI is %x\n", cfg_hi);
    /* allow TC and ERR interrupts */
    intrx = 1<<(chan->number);
    pr_debug("devconfig_6\n");
    dma_set_bit(chan->dmac,MASK.XFER,intrx);
    pr_debug("devconfig_7\n");
    dma_set_bit(chan->dmac,MASK.BLOCK,intrx);
    dma_set_bit(chan->dmac,MASK.ERROR,intrx);

    pr_debug("devconfig_8\n");
    return 0;
}
Пример #12
0
int imapx200_dma_enqueue(unsigned int channel, void *id,
                         dma_addr_t data, int size)
{
    struct imapx200_dma_chan *chan = imap_dma_lookup_channel(channel);
    struct imapx200_dma_buff *next;
    struct imapx200_dma_buff *buff;
    struct dw_lli *lli;
    int ret;

    WARN_ON(!chan);
    if (!chan)
        return -EINVAL;

    buff = kzalloc(sizeof(struct imapx200_dma_buff), GFP_KERNEL);
    if (!buff) {
        printk(KERN_ERR "%s: no memory for buffer\n", __func__);
        return -ENOMEM;
    }


    lli = dma_pool_alloc(dma_pool, GFP_KERNEL, &buff->lli_dma);
    if (!lli) {
        printk(KERN_ERR "%s: no memory for lli\n", __func__);
        ret = -ENOMEM;
        goto err_buff;
    }

    pr_debug("%s: buff %p, dp %08x lli (%p, %08x) %d\n",
             __func__, buff, data, lli, (u32)buff->lli_dma, size);

    buff->lli = lli;
    buff->pw = id;

    imapx200_dma_fill_lli(chan, lli, data, size);

    if ((next = chan->next) != NULL) {
        struct imapx200_dma_buff *end = chan->end;
        struct dw_lli *endlli = end->lli;

        pr_debug("enquing onto channel\n");

        end->next = buff;
        endlli->llp = buff->lli_dma;

        if (chan->flags ) {
            struct imapx200_dma_buff *curr = chan->curr;
            lli->llp = curr->lli_dma;
        }

        if (next == chan->curr) {
            chan_writel(chan,LLP,buff->lli_dma);
            chan->next = buff;
        }

        chan->end = buff;
    } else {
        pr_debug("enquing onto empty channel\n");

        chan->curr = buff;
        chan->next = buff;
        chan->end = buff;

        imapx200_lli_to_regs(chan, lli);
    }

    dbg_showchan(chan);
    return 0;

err_buff:
    kfree(buff);

    return ret;
}