Ejemplo n.º 1
0
static int config_dma(void)
{
	u32 i = 0;

	pr_debug("%s\n", __func__);

	/* fill descriptor table */
	for (i = 0; i < 326; i++) {
		/* point to next desc table */
		dma_desc_table[2 * i] =
		    (unsigned long)&dma_desc_table[2 * i + 2];
	}
	/* last descriptor points to first */
	dma_desc_table[2 * 326] = (unsigned long)&dma_desc_table[0];

#ifdef CONFIG_FB_HITACHI_TX09_LANDSCAPE

	dma_desc_table[0 + 1] = (unsigned long)fb_buffer;
	for (i = 0; i < 7; i++) {
		/* blanking lines point to first line of fb_buffer */
		dma_desc_table[2 * i + 1] = (unsigned long)fb_buffer + 319 * 2;
	}
	for (i = 7; i < 327; i++) {
		/* visible lines */
		dma_desc_table[2 * i + 1] =
		    (unsigned long)fb_buffer + (319 - (i - 7)) * 2;
	}

#else				/* portrait mode */

	for (i = 0; i < 7; i++) {
		/* blanking lines point to first line of fb_buffer */
		dma_desc_table[2 * i + 1] = (unsigned long)fb_buffer;
	}
	for (i = 7; i < 327; i++) {
		/* visible lines */
		dma_desc_table[2 * i + 1] =
		    (unsigned long)fb_buffer + 2 * 240 * (i - 7);
	}

#endif

#ifdef CONFIG_FB_HITACHI_TX09_LANDSCAPE
	set_dma_x_count(CH_PPI, 240);
	set_dma_x_modify(CH_PPI, 2 * 320);
	set_dma_y_count(CH_PPI, 0);
	set_dma_y_modify(CH_PPI, 0);
	set_dma_next_desc_addr(CH_PPI, (unsigned long)dma_desc_table[2 * 326]);
#else
	set_dma_x_count(CH_PPI, 240);
	set_dma_x_modify(CH_PPI, 2);
	set_dma_y_count(CH_PPI, 0);
	set_dma_y_modify(CH_PPI, 0);
	set_dma_next_desc_addr(CH_PPI, (unsigned long)dma_desc_table[2 * 326]);
#endif

	set_dma_config(CH_PPI, 0x7404);

	return 0;
}
Ejemplo n.º 2
0
static void bfin_sir_dma_tx_chars(struct net_device *dev)
{
	struct bfin_sir_self *self = netdev_priv(dev);
	struct bfin_sir_port *port = self->sir_port;

	if (!port->tx_done)
		return;
	port->tx_done = 0;

	if (self->tx_buff.len == 0) {
		self->stats.tx_packets++;
		if (self->newspeed) {
			bfin_sir_set_speed(port, self->newspeed);
			self->speed = self->newspeed;
			self->newspeed = 0;
		}
		bfin_sir_enable_rx(port);
		port->tx_done = 1;
		netif_wake_queue(dev);
		return;
	}

	blackfin_dcache_flush_range((unsigned long)(self->tx_buff.data),
		(unsigned long)(self->tx_buff.data+self->tx_buff.len));
	set_dma_config(port->tx_dma_channel,
		set_bfin_dma_config(DIR_READ, DMA_FLOW_STOP,
			INTR_ON_BUF, DIMENSION_LINEAR, DATA_SIZE_8,
			DMA_SYNC_RESTART));
	set_dma_start_addr(port->tx_dma_channel,
		(unsigned long)(self->tx_buff.data));
	set_dma_x_count(port->tx_dma_channel, self->tx_buff.len);
	set_dma_x_modify(port->tx_dma_channel, 1);
	enable_dma(port->tx_dma_channel);
}
Ejemplo n.º 3
0
static int dma_init(void) {
    int ret;

    /* Request DMA channel */
    ret = request_dma(CH_PPI, DRIVER_NAME);
    if(ret < 0) {
        printk(KERN_WARNING DRIVER_NAME ": Could not allocate DMA channel\n");
        return ret;
    }

    /* Disable channel while it is being configured */
    disable_dma(CH_PPI);

    /* Allocate buffer space for the DMA engine to use */
    dma_buffer = __get_dma_pages(GFP_KERNEL, page_alloc_order(BUFFER_SIZE * BUFFER_COUNT));
    if(dma_buffer == 0) {
        printk(KERN_WARNING DRIVER_NAME ": Could not allocate dma_pages\n");
        free_dma(CH_PPI);
        return -ENOMEM;
    }

    /* Invalid caching on the DMA buffer */
    invalidate_dcache_range(dma_buffer, dma_buffer + (BUFFER_SIZE * BUFFER_COUNT));

    /* Set DMA configuration */
    set_dma_start_addr(CH_PPI, dma_buffer);
    set_dma_config(CH_PPI, (DMAFLOW_AUTO | WNR | RESTART | DI_EN | WDSIZE_16 | DMA2D | DI_SEL));
    set_dma_x_count(CH_PPI, SAMPLES_PER_BUFFER * CHANNELS);
    set_dma_x_modify(CH_PPI, SAMPLE_SIZE);
    set_dma_y_count(CH_PPI, BUFFER_COUNT);
    set_dma_y_modify(CH_PPI, SAMPLE_SIZE);
    set_dma_callback(CH_PPI, &buffer_full_handler, NULL);

    return 0;
}
Ejemplo n.º 4
0
static int config_dma(void)
{
	u32 i;

	if(landscape) {

		for (i=0;i<U_LINES;i++)
		{
			//blanking lines point to first line of fb_buffer
			dma_desc_table[2*i] = (unsigned long)&dma_desc_table[2*i+2];
			dma_desc_table[2*i+1] = (unsigned long)fb_buffer;
		}

		for (i=U_LINES;i<U_LINES+LCD_Y_RES;i++)
		{
			// visible lines
			dma_desc_table[2*i] = (unsigned long)&dma_desc_table[2*i+2];
			dma_desc_table[2*i+1] = (unsigned long)fb_buffer + (LCD_Y_RES+U_LINES-1-i)*2;
		}

		//last descriptor points to first
		dma_desc_table[2*(LCD_Y_RES+U_LINES-1)] = (unsigned long)&dma_desc_table[0];

		set_dma_x_count(CH_PPI, LCD_X_RES);
		set_dma_x_modify(CH_PPI, LCD_Y_RES*(LCD_BBP/8));
		set_dma_y_count(CH_PPI, 0);
		set_dma_y_modify(CH_PPI, 0);
		set_dma_next_desc_addr(CH_PPI, (void *)dma_desc_table[0]);
		set_dma_config(CH_PPI, DMAFLOW_LARGE | NDSIZE_4 | WDSIZE_16);

	} else {

		set_dma_config(CH_PPI, set_bfin_dma_config(DIR_READ,
				DMA_FLOW_AUTO,
				INTR_DISABLE,
				DIMENSION_2D,
				DATA_SIZE_16,
				DMA_NOSYNC_KEEP_DMA_BUF));
		set_dma_x_count(CH_PPI, LCD_X_RES);
		set_dma_x_modify(CH_PPI,LCD_BBP/8);
		set_dma_y_count(CH_PPI, LCD_Y_RES+U_LINES);
		set_dma_y_modify(CH_PPI, LCD_BBP/8);
		set_dma_start_addr(CH_PPI, ((unsigned long) fb_buffer));
	}

	return 0;
}
Ejemplo n.º 5
0
static int bfin_sir_startup(struct bfin_sir_port *port, struct net_device *dev)
{
#ifdef CONFIG_SIR_BFIN_DMA
	dma_addr_t dma_handle;
#endif /* CONFIG_SIR_BFIN_DMA */

	if (request_dma(port->rx_dma_channel, "BFIN_UART_RX") < 0) {
		dev_warn(&dev->dev, "Unable to attach SIR RX DMA channel\n");
		return -EBUSY;
	}

	if (request_dma(port->tx_dma_channel, "BFIN_UART_TX") < 0) {
		dev_warn(&dev->dev, "Unable to attach SIR TX DMA channel\n");
		free_dma(port->rx_dma_channel);
		return -EBUSY;
	}

#ifdef CONFIG_SIR_BFIN_DMA

	set_dma_callback(port->rx_dma_channel, bfin_sir_dma_rx_int, dev);
	set_dma_callback(port->tx_dma_channel, bfin_sir_dma_tx_int, dev);

	port->rx_dma_buf.buf = dma_alloc_coherent(NULL, PAGE_SIZE,
						  &dma_handle, GFP_DMA);
	port->rx_dma_buf.head = 0;
	port->rx_dma_buf.tail = 0;
	port->rx_dma_nrows = 0;

	set_dma_config(port->rx_dma_channel,
				set_bfin_dma_config(DIR_WRITE, DMA_FLOW_AUTO,
									INTR_ON_ROW, DIMENSION_2D,
									DATA_SIZE_8, DMA_SYNC_RESTART));
	set_dma_x_count(port->rx_dma_channel, DMA_SIR_RX_XCNT);
	set_dma_x_modify(port->rx_dma_channel, 1);
	set_dma_y_count(port->rx_dma_channel, DMA_SIR_RX_YCNT);
	set_dma_y_modify(port->rx_dma_channel, 1);
	set_dma_start_addr(port->rx_dma_channel, (unsigned long)port->rx_dma_buf.buf);
	enable_dma(port->rx_dma_channel);

	port->rx_dma_timer.data = (unsigned long)(dev);
	port->rx_dma_timer.function = (void *)bfin_sir_rx_dma_timeout;

#else

	if (request_irq(port->irq, bfin_sir_rx_int, 0, "BFIN_SIR_RX", dev)) {
		dev_warn(&dev->dev, "Unable to attach SIR RX interrupt\n");
		return -EBUSY;
	}

	if (request_irq(port->irq+1, bfin_sir_tx_int, 0, "BFIN_SIR_TX", dev)) {
		dev_warn(&dev->dev, "Unable to attach SIR TX interrupt\n");
		free_irq(port->irq, dev);
		return -EBUSY;
	}
#endif

	return 0;
}
Ejemplo n.º 6
0
static ssize_t coreb_read(struct file *file, char * buf, size_t count, loff_t *ppos)
{
	unsigned long p = *ppos;
	ssize_t read = 0;

	if ((p + count) > coreb_size) return -EFAULT;

	while (count > 0)
	{
		int len = count;
		if (len > PAGE_SIZE) len = PAGE_SIZE;

		/* Source Channel */
		set_dma_start_addr(CH_MEM_STREAM2_SRC, coreb_base + p);
		set_dma_x_count(CH_MEM_STREAM2_SRC, len);
		set_dma_x_modify(CH_MEM_STREAM2_SRC, sizeof(char));
		set_dma_config(CH_MEM_STREAM2_SRC, RESTART);
		/* Destination Channel */
		set_dma_start_addr(CH_MEM_STREAM2_DEST, (unsigned long)buf);
		set_dma_x_count(CH_MEM_STREAM2_DEST, len);
		set_dma_x_modify(CH_MEM_STREAM2_DEST, sizeof(char));
		set_dma_config(CH_MEM_STREAM2_DEST, WNR | RESTART | DI_EN);
	
		enable_dma(CH_MEM_STREAM2_SRC);
		enable_dma(CH_MEM_STREAM2_DEST);

		interruptible_sleep_on(&coreb_dma_wait);

		disable_dma(CH_MEM_STREAM2_SRC);
		disable_dma(CH_MEM_STREAM2_DEST);

		count -= len;
		read += len;
		buf += len;
		p += len;
	} 

	return read;
}
Ejemplo n.º 7
0
/*
 * FUNCTION NAME: ppi_read
 *
 * INPUTS/OUTPUTS:
 * in_filp - Description of openned file.
 * in_count - how many bytes user wants to get.
 * out_buf - data would be write to this address.
 *
 * RETURN
 * positive number: bytes read back
 * -EINVIL When word size is set to 16, reading odd bytes.
 * -EAGAIN When reading mode is set to non block and there is no rx data.
 *
 * FUNCTION(S) CALLED:
 *
 * GLOBAL VARIABLES REFERENCED: ppiinfo
 *
 * GLOBAL VARIABLES MODIFIED: NIL
 *
 * DESCRIPTION: It is invoked when user call 'read' system call
 *              to read from system.
 *
 * CAUTION:
 */
static ssize_t ppi_read(struct file *filp, char *buf, size_t count,
			loff_t * pos)
{
	int ierr;
	ppi_device_t *pdev = filp->private_data;

	pr_debug("ppi_read:\n");

	if (count <= 0)
		return 0;

	pdev->done = 0;

	/* Invalidate allocated memory in Data Cache */

	blackfin_dcache_invalidate_range((u_long) buf, (u_long) (buf + count));

	pr_debug("ppi_read: blackfin_dcache_invalidate_range : DONE\n");

	/* configure ppi port for DMA RX */

	set_dma_config(CH_PPI, pdev->dma_config);
	set_dma_start_addr(CH_PPI, (u_long) buf);
	set_dma_x_count(CH_PPI, pdev->pixel_per_line / 2);	// Div 2 because of 16-bit packing
	set_dma_y_count(CH_PPI, pdev->lines_per_frame);
	set_dma_y_modify(CH_PPI, 2);

	if (pdev->bpp > 8 || pdev->dma_config & WDSIZE_16)
		set_dma_x_modify(CH_PPI, 2);
	else
		set_dma_x_modify(CH_PPI, 1);

	pr_debug("ppi_read: SETUP DMA : DONE\n");

	enable_dma(CH_PPI);

	/* Enable PPI */

	bfin_write_PPI_CONTROL(bfin_read_PPI_CONTROL() | PORT_EN);
	SSYNC();

	if (pdev->ppi_trigger_gpio > NO_TRIGGER) {
		gpio_set_value(pdev->ppi_trigger_gpio, 1);
		udelay(1);
		gpio_set_value(pdev->ppi_trigger_gpio, 0);
	}

	pr_debug("ppi_read: PPI ENABLED : DONE\n");

	/* Wait for data available */
	if (1) {
		if (pdev->nonblock)
			return -EAGAIN;
		else {
			pr_debug("PPI wait_event_interruptible\n");
			ierr =
			    wait_event_interruptible(*(pdev->rx_avail),
						     pdev->done);
			if (ierr) {
				/* waiting is broken by a signal */
				pr_debug("PPI wait_event_interruptible ierr\n");
				return ierr;
			}
		}
	}

	pr_debug("PPI wait_event_interruptible done\n");

	disable_dma(CH_PPI);

	pr_debug("ppi_read: return\n");

	return count;
}
Ejemplo n.º 8
0
static int bfin_serial_startup(struct uart_port *port)
{
	struct bfin_serial_port *uart = (struct bfin_serial_port *)port;

#ifdef CONFIG_SERIAL_BFIN_DMA
	dma_addr_t dma_handle;

	if (request_dma(uart->rx_dma_channel, "BFIN_UART_RX") < 0) {
		printk(KERN_NOTICE "Unable to attach Blackfin UART RX DMA channel\n");
		return -EBUSY;
	}

	if (request_dma(uart->tx_dma_channel, "BFIN_UART_TX") < 0) {
		printk(KERN_NOTICE "Unable to attach Blackfin UART TX DMA channel\n");
		free_dma(uart->rx_dma_channel);
		return -EBUSY;
	}

	set_dma_callback(uart->rx_dma_channel, bfin_serial_dma_rx_int, uart);
	set_dma_callback(uart->tx_dma_channel, bfin_serial_dma_tx_int, uart);

	uart->rx_dma_buf.buf = (unsigned char *)dma_alloc_coherent(NULL, PAGE_SIZE, &dma_handle, GFP_DMA);
	uart->rx_dma_buf.head = 0;
	uart->rx_dma_buf.tail = 0;
	uart->rx_dma_nrows = 0;

	set_dma_config(uart->rx_dma_channel,
		set_bfin_dma_config(DIR_WRITE, DMA_FLOW_AUTO,
				INTR_ON_ROW, DIMENSION_2D,
				DATA_SIZE_8,
				DMA_SYNC_RESTART));
	set_dma_x_count(uart->rx_dma_channel, DMA_RX_XCOUNT);
	set_dma_x_modify(uart->rx_dma_channel, 1);
	set_dma_y_count(uart->rx_dma_channel, DMA_RX_YCOUNT);
	set_dma_y_modify(uart->rx_dma_channel, 1);
	set_dma_start_addr(uart->rx_dma_channel, (unsigned long)uart->rx_dma_buf.buf);
	enable_dma(uart->rx_dma_channel);

	uart->rx_dma_timer.data = (unsigned long)(uart);
	uart->rx_dma_timer.function = (void *)bfin_serial_rx_dma_timeout;
	uart->rx_dma_timer.expires = jiffies + DMA_RX_FLUSH_JIFFIES;
	add_timer(&(uart->rx_dma_timer));
#else
#if defined(CONFIG_KGDB_SERIAL_CONSOLE) || \
	defined(CONFIG_KGDB_SERIAL_CONSOLE_MODULE)
	if (kgdboc_port_line == uart->port.line && kgdboc_break_enabled)
		kgdboc_break_enabled = 0;
	else {
# endif
	if (request_irq(uart->port.irq, bfin_serial_rx_int, IRQF_DISABLED,
	     "BFIN_UART_RX", uart)) {
		printk(KERN_NOTICE "Unable to attach BlackFin UART RX interrupt\n");
		return -EBUSY;
	}

	if (request_irq
	    (uart->port.irq+1, bfin_serial_tx_int, IRQF_DISABLED,
	     "BFIN_UART_TX", uart)) {
		printk(KERN_NOTICE "Unable to attach BlackFin UART TX interrupt\n");
		free_irq(uart->port.irq, uart);
		return -EBUSY;
	}

# ifdef CONFIG_BF54x
	{
		unsigned uart_dma_ch_rx, uart_dma_ch_tx;

		switch (uart->port.irq) {
		case IRQ_UART3_RX:
			uart_dma_ch_rx = CH_UART3_RX;
			uart_dma_ch_tx = CH_UART3_TX;
			break;
		case IRQ_UART2_RX:
			uart_dma_ch_rx = CH_UART2_RX;
			uart_dma_ch_tx = CH_UART2_TX;
			break;
		default:
			uart_dma_ch_rx = uart_dma_ch_tx = 0;
			break;
		};

		if (uart_dma_ch_rx &&
			request_dma(uart_dma_ch_rx, "BFIN_UART_RX") < 0) {
			printk(KERN_NOTICE"Fail to attach UART interrupt\n");
			free_irq(uart->port.irq, uart);
			free_irq(uart->port.irq + 1, uart);
			return -EBUSY;
		}
		if (uart_dma_ch_tx &&
			request_dma(uart_dma_ch_tx, "BFIN_UART_TX") < 0) {
			printk(KERN_NOTICE "Fail to attach UART interrupt\n");
			free_dma(uart_dma_ch_rx);
			free_irq(uart->port.irq, uart);
			free_irq(uart->port.irq + 1, uart);
			return -EBUSY;
		}
	}
# endif
#if defined(CONFIG_KGDB_SERIAL_CONSOLE) || \
	defined(CONFIG_KGDB_SERIAL_CONSOLE_MODULE)
	}
# endif
#endif
	UART_SET_IER(uart, ERBFI);
	return 0;
}
Ejemplo n.º 9
0
static void bfin_serial_rx_chars(struct bfin_serial_port *uart)
{
	struct tty_struct *tty = NULL;
	unsigned int status, ch, flg;
	static struct timeval anomaly_start = { .tv_sec = 0 };

	status = UART_GET_LSR(uart);
	UART_CLEAR_LSR(uart);

 	ch = UART_GET_CHAR(uart);
 	uart->port.icount.rx++;

#if defined(CONFIG_KGDB_SERIAL_CONSOLE) || \
	defined(CONFIG_KGDB_SERIAL_CONSOLE_MODULE)
	if (kgdb_connected && kgdboc_port_line == uart->port.line)
		if (ch == 0x3) {/* Ctrl + C */
			kgdb_breakpoint();
			return;
		}

	if (!uart->port.info || !uart->port.info->tty)
		return;
#endif
	tty = uart->port.info->tty;

	if (ANOMALY_05000363) {
		/* The BF533 (and BF561) family of processors have a nice anomaly
		 * where they continuously generate characters for a "single" break.
		 * We have to basically ignore this flood until the "next" valid
		 * character comes across.  Due to the nature of the flood, it is
		 * not possible to reliably catch bytes that are sent too quickly
		 * after this break.  So application code talking to the Blackfin
		 * which sends a break signal must allow at least 1.5 character
		 * times after the end of the break for things to stabilize.  This
		 * timeout was picked as it must absolutely be larger than 1
		 * character time +/- some percent.  So 1.5 sounds good.  All other
		 * Blackfin families operate properly.  Woo.
		 */
		if (anomaly_start.tv_sec) {
			struct timeval curr;
			suseconds_t usecs;

			if ((~ch & (~ch + 1)) & 0xff)
				goto known_good_char;

			do_gettimeofday(&curr);
			if (curr.tv_sec - anomaly_start.tv_sec > 1)
				goto known_good_char;

			usecs = 0;
			if (curr.tv_sec != anomaly_start.tv_sec)
				usecs += USEC_PER_SEC;
			usecs += curr.tv_usec - anomaly_start.tv_usec;

			if (usecs > UART_GET_ANOMALY_THRESHOLD(uart))
				goto known_good_char;

			if (ch)
				anomaly_start.tv_sec = 0;
			else
				anomaly_start = curr;

			return;

 known_good_char:
			status &= ~BI;
			anomaly_start.tv_sec = 0;
		}
	}

	if (status & BI) {
		if (ANOMALY_05000363)
			if (bfin_revid() < 5)
				do_gettimeofday(&anomaly_start);
		uart->port.icount.brk++;
		if (uart_handle_break(&uart->port))
			goto ignore_char;
		status &= ~(PE | FE);
	}
	if (status & PE)
		uart->port.icount.parity++;
	if (status & OE)
		uart->port.icount.overrun++;
	if (status & FE)
		uart->port.icount.frame++;

	status &= uart->port.read_status_mask;

	if (status & BI)
		flg = TTY_BREAK;
	else if (status & PE)
		flg = TTY_PARITY;
	else if (status & FE)
		flg = TTY_FRAME;
	else
		flg = TTY_NORMAL;

	if (uart_handle_sysrq_char(&uart->port, ch))
		goto ignore_char;

	uart_insert_char(&uart->port, status, OE, ch, flg);

 ignore_char:
	tty_flip_buffer_push(tty);
}

static void bfin_serial_tx_chars(struct bfin_serial_port *uart)
{
	struct circ_buf *xmit = &uart->port.info->xmit;

	/*
	 * Check the modem control lines before
	 * transmitting anything.
	 */
	bfin_serial_mctrl_check(uart);

	if (uart_circ_empty(xmit) || uart_tx_stopped(&uart->port)) {
#ifdef CONFIG_BF54x
		/* Clear TFI bit */
		UART_PUT_LSR(uart, TFI);
#endif
		UART_CLEAR_IER(uart, ETBEI);
		return;
	}

	if (uart->port.x_char) {
		UART_PUT_CHAR(uart, uart->port.x_char);
		uart->port.icount.tx++;
		uart->port.x_char = 0;
	}

	while ((UART_GET_LSR(uart) & THRE) && xmit->tail != xmit->head) {
		UART_PUT_CHAR(uart, xmit->buf[xmit->tail]);
		xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1);
		uart->port.icount.tx++;
		SSYNC();
	}

	if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
		uart_write_wakeup(&uart->port);
}

static irqreturn_t bfin_serial_rx_int(int irq, void *dev_id)
{
	struct bfin_serial_port *uart = dev_id;

	spin_lock(&uart->port.lock);
	while (UART_GET_LSR(uart) & DR)
		bfin_serial_rx_chars(uart);
	spin_unlock(&uart->port.lock);

	return IRQ_HANDLED;
}

static irqreturn_t bfin_serial_tx_int(int irq, void *dev_id)
{
	struct bfin_serial_port *uart = dev_id;

	spin_lock(&uart->port.lock);
	if (UART_GET_LSR(uart) & THRE)
		bfin_serial_tx_chars(uart);
	spin_unlock(&uart->port.lock);

	return IRQ_HANDLED;
}
#endif

#ifdef CONFIG_SERIAL_BFIN_DMA
static void bfin_serial_dma_tx_chars(struct bfin_serial_port *uart)
{
	struct circ_buf *xmit = &uart->port.info->xmit;

	uart->tx_done = 0;

	/*
	 * Check the modem control lines before
	 * transmitting anything.
	 */
	bfin_serial_mctrl_check(uart);

	if (uart_circ_empty(xmit) || uart_tx_stopped(&uart->port)) {
		uart->tx_count = 0;
		uart->tx_done = 1;
		return;
	}

	if (uart->port.x_char) {
		UART_PUT_CHAR(uart, uart->port.x_char);
		uart->port.icount.tx++;
		uart->port.x_char = 0;
	}

	uart->tx_count = CIRC_CNT(xmit->head, xmit->tail, UART_XMIT_SIZE);
	if (uart->tx_count > (UART_XMIT_SIZE - xmit->tail))
		uart->tx_count = UART_XMIT_SIZE - xmit->tail;
	blackfin_dcache_flush_range((unsigned long)(xmit->buf+xmit->tail),
					(unsigned long)(xmit->buf+xmit->tail+uart->tx_count));
	set_dma_config(uart->tx_dma_channel,
		set_bfin_dma_config(DIR_READ, DMA_FLOW_STOP,
			INTR_ON_BUF,
			DIMENSION_LINEAR,
			DATA_SIZE_8,
			DMA_SYNC_RESTART));
	set_dma_start_addr(uart->tx_dma_channel, (unsigned long)(xmit->buf+xmit->tail));
	set_dma_x_count(uart->tx_dma_channel, uart->tx_count);
	set_dma_x_modify(uart->tx_dma_channel, 1);
	enable_dma(uart->tx_dma_channel);

	UART_SET_IER(uart, ETBEI);
}

static void bfin_serial_dma_rx_chars(struct bfin_serial_port *uart)
{
	struct tty_struct *tty = uart->port.info->port.tty;
	int i, flg, status;

	status = UART_GET_LSR(uart);
	UART_CLEAR_LSR(uart);

	uart->port.icount.rx +=
		CIRC_CNT(uart->rx_dma_buf.head, uart->rx_dma_buf.tail,
		UART_XMIT_SIZE);

	if (status & BI) {
		uart->port.icount.brk++;
		if (uart_handle_break(&uart->port))
			goto dma_ignore_char;
		status &= ~(PE | FE);
	}
	if (status & PE)
		uart->port.icount.parity++;
	if (status & OE)
		uart->port.icount.overrun++;
	if (status & FE)
		uart->port.icount.frame++;

	status &= uart->port.read_status_mask;

	if (status & BI)
		flg = TTY_BREAK;
	else if (status & PE)
		flg = TTY_PARITY;
	else if (status & FE)
		flg = TTY_FRAME;
	else
		flg = TTY_NORMAL;

	for (i = uart->rx_dma_buf.tail; i != uart->rx_dma_buf.head; i++) {
		if (i >= UART_XMIT_SIZE)
			i = 0;
		if (!uart_handle_sysrq_char(&uart->port, uart->rx_dma_buf.buf[i]))
			uart_insert_char(&uart->port, status, OE,
				uart->rx_dma_buf.buf[i], flg);
	}

 dma_ignore_char:
	tty_flip_buffer_push(tty);
}

void bfin_serial_rx_dma_timeout(struct bfin_serial_port *uart)
{
	int x_pos, pos, flags;

	spin_lock_irqsave(&uart->port.lock, flags);

	uart->rx_dma_nrows = get_dma_curr_ycount(uart->rx_dma_channel);
	x_pos = get_dma_curr_xcount(uart->rx_dma_channel);
	uart->rx_dma_nrows = DMA_RX_YCOUNT - uart->rx_dma_nrows;
	if (uart->rx_dma_nrows == DMA_RX_YCOUNT)
		uart->rx_dma_nrows = 0;
	x_pos = DMA_RX_XCOUNT - x_pos;
	if (x_pos == DMA_RX_XCOUNT)
		x_pos = 0;

	pos = uart->rx_dma_nrows * DMA_RX_XCOUNT + x_pos;
	if (pos != uart->rx_dma_buf.tail) {
		uart->rx_dma_buf.head = pos;
		bfin_serial_dma_rx_chars(uart);
		uart->rx_dma_buf.tail = uart->rx_dma_buf.head;
	}

	spin_unlock_irqrestore(&uart->port.lock, flags);

	mod_timer(&(uart->rx_dma_timer), jiffies + DMA_RX_FLUSH_JIFFIES);
}
Ejemplo n.º 10
0
static int bfin_serial_startup(struct uart_port *port)
{
	struct bfin_serial_port *uart = (struct bfin_serial_port *)port;

#ifdef CONFIG_SERIAL_BFIN_DMA
	dma_addr_t dma_handle;

	if (request_dma(uart->rx_dma_channel, "BFIN_UART_RX") < 0) {
		printk(KERN_NOTICE "Unable to attach Blackfin UART RX DMA channel\n");
		return -EBUSY;
	}

	if (request_dma(uart->tx_dma_channel, "BFIN_UART_TX") < 0) {
		printk(KERN_NOTICE "Unable to attach Blackfin UART TX DMA channel\n");
		free_dma(uart->rx_dma_channel);
		return -EBUSY;
	}

	set_dma_callback(uart->rx_dma_channel, bfin_serial_dma_rx_int, uart);
	set_dma_callback(uart->tx_dma_channel, bfin_serial_dma_tx_int, uart);

	uart->rx_dma_buf.buf = (unsigned char *)dma_alloc_coherent(NULL, PAGE_SIZE, &dma_handle, GFP_DMA);
	uart->rx_dma_buf.head = 0;
	uart->rx_dma_buf.tail = 0;
	uart->rx_dma_nrows = 0;

	set_dma_config(uart->rx_dma_channel,
		set_bfin_dma_config(DIR_WRITE, DMA_FLOW_AUTO,
				INTR_ON_ROW, DIMENSION_2D,
				DATA_SIZE_8,
				DMA_SYNC_RESTART));
	set_dma_x_count(uart->rx_dma_channel, DMA_RX_XCOUNT);
	set_dma_x_modify(uart->rx_dma_channel, 1);
	set_dma_y_count(uart->rx_dma_channel, DMA_RX_YCOUNT);
	set_dma_y_modify(uart->rx_dma_channel, 1);
	set_dma_start_addr(uart->rx_dma_channel, (unsigned long)uart->rx_dma_buf.buf);
	enable_dma(uart->rx_dma_channel);

	uart->rx_dma_timer.data = (unsigned long)(uart);
	uart->rx_dma_timer.function = (void *)bfin_serial_rx_dma_timeout;
	uart->rx_dma_timer.expires = jiffies + DMA_RX_FLUSH_JIFFIES;
	add_timer(&(uart->rx_dma_timer));
#else
# if defined(CONFIG_KGDB_SERIAL_CONSOLE) || \
	defined(CONFIG_KGDB_SERIAL_CONSOLE_MODULE)
	if (kgdboc_port_line == uart->port.line && kgdboc_break_enabled)
		kgdboc_break_enabled = 0;
	else {
# endif
	if (request_irq(uart->port.irq, bfin_serial_rx_int, IRQF_DISABLED,
	     "BFIN_UART_RX", uart)) {
		printk(KERN_NOTICE "Unable to attach BlackFin UART RX interrupt\n");
		return -EBUSY;
	}

	if (request_irq
	    (uart->port.irq+1, bfin_serial_tx_int, IRQF_DISABLED,
	     "BFIN_UART_TX", uart)) {
		printk(KERN_NOTICE "Unable to attach BlackFin UART TX interrupt\n");
		free_irq(uart->port.irq, uart);
		return -EBUSY;
	}

# ifdef CONFIG_BF54x
	{
		/*
		 * UART2 and UART3 on BF548 share interrupt PINs and DMA
		 * controllers with SPORT2 and SPORT3. UART rx and tx
		 * interrupts are generated in PIO mode only when configure
		 * their peripheral mapping registers properly, which means
		 * request corresponding DMA channels in PIO mode as well.
		 */
		unsigned uart_dma_ch_rx, uart_dma_ch_tx;

		switch (uart->port.irq) {
		case IRQ_UART3_RX:
			uart_dma_ch_rx = CH_UART3_RX;
			uart_dma_ch_tx = CH_UART3_TX;
			break;
		case IRQ_UART2_RX:
			uart_dma_ch_rx = CH_UART2_RX;
			uart_dma_ch_tx = CH_UART2_TX;
			break;
		default:
			uart_dma_ch_rx = uart_dma_ch_tx = 0;
			break;
		};

		if (uart_dma_ch_rx &&
			request_dma(uart_dma_ch_rx, "BFIN_UART_RX") < 0) {
			printk(KERN_NOTICE"Fail to attach UART interrupt\n");
			free_irq(uart->port.irq, uart);
			free_irq(uart->port.irq + 1, uart);
			return -EBUSY;
		}
		if (uart_dma_ch_tx &&
			request_dma(uart_dma_ch_tx, "BFIN_UART_TX") < 0) {
			printk(KERN_NOTICE "Fail to attach UART interrupt\n");
			free_dma(uart_dma_ch_rx);
			free_irq(uart->port.irq, uart);
			free_irq(uart->port.irq + 1, uart);
			return -EBUSY;
		}
	}
# endif
# if defined(CONFIG_KGDB_SERIAL_CONSOLE) || \
	defined(CONFIG_KGDB_SERIAL_CONSOLE_MODULE)
	}
# endif
#endif

#ifdef CONFIG_SERIAL_BFIN_CTSRTS
	if (uart->cts_pin >= 0) {
		if (request_irq(gpio_to_irq(uart->cts_pin),
			bfin_serial_mctrl_cts_int,
			IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING |
			IRQF_DISABLED, "BFIN_UART_CTS", uart)) {
			uart->cts_pin = -1;
			pr_info("Unable to attach BlackFin UART CTS interrupt. So, disable it.\n");
		}
	}
	if (uart->rts_pin >= 0) {
		gpio_direction_output(uart->rts_pin, 0);
	}
#endif
#ifdef CONFIG_SERIAL_BFIN_HARD_CTSRTS
	if (uart->cts_pin >= 0 && request_irq(uart->status_irq,
		bfin_serial_mctrl_cts_int,
		IRQF_DISABLED, "BFIN_UART_MODEM_STATUS", uart)) {
		uart->cts_pin = -1;
		pr_info("Unable to attach BlackFin UART Modem Status interrupt.\n");
	}

	/* CTS RTS PINs are negative assertive. */
	UART_PUT_MCR(uart, ACTS);
	UART_SET_IER(uart, EDSSI);
#endif

	UART_SET_IER(uart, ERBFI);
	return 0;
}
Ejemplo n.º 11
0
static void bfin_serial_rx_chars(struct bfin_serial_port *uart)
{
	struct tty_struct *tty = NULL;
	unsigned int status, ch, flg;
	static struct timeval anomaly_start = { .tv_sec = 0 };

	status = UART_GET_LSR(uart);
	UART_CLEAR_LSR(uart);

 	ch = UART_GET_CHAR(uart);
 	uart->port.icount.rx++;

#if defined(CONFIG_KGDB_SERIAL_CONSOLE) || \
	defined(CONFIG_KGDB_SERIAL_CONSOLE_MODULE)
	if (kgdb_connected && kgdboc_port_line == uart->port.line
		&& kgdboc_break_enabled)
		if (ch == 0x3) {/* Ctrl + C */
			kgdb_breakpoint();
			return;
		}

	if (!uart->port.state || !uart->port.state->port.tty)
		return;
#endif
	tty = uart->port.state->port.tty;

	if (ANOMALY_05000363) {
		/* The BF533 (and BF561) family of processors have a nice anomaly
		 * where they continuously generate characters for a "single" break.
		 * We have to basically ignore this flood until the "next" valid
		 * character comes across.  Due to the nature of the flood, it is
		 * not possible to reliably catch bytes that are sent too quickly
		 * after this break.  So application code talking to the Blackfin
		 * which sends a break signal must allow at least 1.5 character
		 * times after the end of the break for things to stabilize.  This
		 * timeout was picked as it must absolutely be larger than 1
		 * character time +/- some percent.  So 1.5 sounds good.  All other
		 * Blackfin families operate properly.  Woo.
		 */
		if (anomaly_start.tv_sec) {
			struct timeval curr;
			suseconds_t usecs;

			if ((~ch & (~ch + 1)) & 0xff)
				goto known_good_char;

			do_gettimeofday(&curr);
			if (curr.tv_sec - anomaly_start.tv_sec > 1)
				goto known_good_char;

			usecs = 0;
			if (curr.tv_sec != anomaly_start.tv_sec)
				usecs += USEC_PER_SEC;
			usecs += curr.tv_usec - anomaly_start.tv_usec;

			if (usecs > UART_GET_ANOMALY_THRESHOLD(uart))
				goto known_good_char;

			if (ch)
				anomaly_start.tv_sec = 0;
			else
				anomaly_start = curr;

			return;

 known_good_char:
			status &= ~BI;
			anomaly_start.tv_sec = 0;
		}
	}

	if (status & BI) {
		if (ANOMALY_05000363)
			if (bfin_revid() < 5)
				do_gettimeofday(&anomaly_start);
		uart->port.icount.brk++;
		if (uart_handle_break(&uart->port))
			goto ignore_char;
		status &= ~(PE | FE);
	}
	if (status & PE)
		uart->port.icount.parity++;
	if (status & OE)
		uart->port.icount.overrun++;
	if (status & FE)
		uart->port.icount.frame++;

	status &= uart->port.read_status_mask;

	if (status & BI)
		flg = TTY_BREAK;
	else if (status & PE)
		flg = TTY_PARITY;
	else if (status & FE)
		flg = TTY_FRAME;
	else
		flg = TTY_NORMAL;

	if (uart_handle_sysrq_char(&uart->port, ch))
		goto ignore_char;

	uart_insert_char(&uart->port, status, OE, ch, flg);

 ignore_char:
	tty_flip_buffer_push(tty);
}

static void bfin_serial_tx_chars(struct bfin_serial_port *uart)
{
	struct circ_buf *xmit = &uart->port.state->xmit;

	if (uart_circ_empty(xmit) || uart_tx_stopped(&uart->port)) {
#ifdef CONFIG_BF54x
		/* Clear TFI bit */
		UART_PUT_LSR(uart, TFI);
#endif
		/* Anomaly notes:
		 *  05000215 -	we always clear ETBEI within last UART TX
		 *		interrupt to end a string. It is always set
		 *		when start a new tx.
		 */
		UART_CLEAR_IER(uart, ETBEI);
		return;
	}

	if (uart->port.x_char) {
		UART_PUT_CHAR(uart, uart->port.x_char);
		uart->port.icount.tx++;
		uart->port.x_char = 0;
	}

	while ((UART_GET_LSR(uart) & THRE) && xmit->tail != xmit->head) {
		UART_PUT_CHAR(uart, xmit->buf[xmit->tail]);
		xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1);
		uart->port.icount.tx++;
	}

	if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
		uart_write_wakeup(&uart->port);
}

static irqreturn_t bfin_serial_rx_int(int irq, void *dev_id)
{
	struct bfin_serial_port *uart = dev_id;

	while (UART_GET_LSR(uart) & DR)
		bfin_serial_rx_chars(uart);

	return IRQ_HANDLED;
}

static irqreturn_t bfin_serial_tx_int(int irq, void *dev_id)
{
	struct bfin_serial_port *uart = dev_id;

#ifdef CONFIG_SERIAL_BFIN_HARD_CTSRTS
	if (uart->scts && !(bfin_serial_get_mctrl(&uart->port) & TIOCM_CTS)) {
		uart->scts = 0;
		uart_handle_cts_change(&uart->port, uart->scts);
	}
#endif
	spin_lock(&uart->port.lock);
	if (UART_GET_LSR(uart) & THRE)
		bfin_serial_tx_chars(uart);
	spin_unlock(&uart->port.lock);

	return IRQ_HANDLED;
}
#endif

#ifdef CONFIG_SERIAL_BFIN_DMA
static void bfin_serial_dma_tx_chars(struct bfin_serial_port *uart)
{
	struct circ_buf *xmit = &uart->port.state->xmit;

	uart->tx_done = 0;

	if (uart_circ_empty(xmit) || uart_tx_stopped(&uart->port)) {
		uart->tx_count = 0;
		uart->tx_done = 1;
		return;
	}

	if (uart->port.x_char) {
		UART_PUT_CHAR(uart, uart->port.x_char);
		uart->port.icount.tx++;
		uart->port.x_char = 0;
	}

	uart->tx_count = CIRC_CNT(xmit->head, xmit->tail, UART_XMIT_SIZE);
	if (uart->tx_count > (UART_XMIT_SIZE - xmit->tail))
		uart->tx_count = UART_XMIT_SIZE - xmit->tail;
	blackfin_dcache_flush_range((unsigned long)(xmit->buf+xmit->tail),
					(unsigned long)(xmit->buf+xmit->tail+uart->tx_count));
	set_dma_config(uart->tx_dma_channel,
		set_bfin_dma_config(DIR_READ, DMA_FLOW_STOP,
			INTR_ON_BUF,
			DIMENSION_LINEAR,
			DATA_SIZE_8,
			DMA_SYNC_RESTART));
	set_dma_start_addr(uart->tx_dma_channel, (unsigned long)(xmit->buf+xmit->tail));
	set_dma_x_count(uart->tx_dma_channel, uart->tx_count);
	set_dma_x_modify(uart->tx_dma_channel, 1);
	SSYNC();
	enable_dma(uart->tx_dma_channel);

	UART_SET_IER(uart, ETBEI);
}

static void bfin_serial_dma_rx_chars(struct bfin_serial_port *uart)
{
	struct tty_struct *tty = uart->port.state->port.tty;
	int i, flg, status;

	status = UART_GET_LSR(uart);
	UART_CLEAR_LSR(uart);

	uart->port.icount.rx +=
		CIRC_CNT(uart->rx_dma_buf.head, uart->rx_dma_buf.tail,
		UART_XMIT_SIZE);

	if (status & BI) {
		uart->port.icount.brk++;
		if (uart_handle_break(&uart->port))
			goto dma_ignore_char;
		status &= ~(PE | FE);
	}
	if (status & PE)
		uart->port.icount.parity++;
	if (status & OE)
		uart->port.icount.overrun++;
	if (status & FE)
		uart->port.icount.frame++;

	status &= uart->port.read_status_mask;

	if (status & BI)
		flg = TTY_BREAK;
	else if (status & PE)
		flg = TTY_PARITY;
	else if (status & FE)
		flg = TTY_FRAME;
	else
		flg = TTY_NORMAL;

	for (i = uart->rx_dma_buf.tail; ; i++) {
		if (i >= UART_XMIT_SIZE)
			i = 0;
		if (i == uart->rx_dma_buf.head)
			break;
		if (!uart_handle_sysrq_char(&uart->port, uart->rx_dma_buf.buf[i]))
			uart_insert_char(&uart->port, status, OE,
				uart->rx_dma_buf.buf[i], flg);
	}

 dma_ignore_char:
	tty_flip_buffer_push(tty);
}

void bfin_serial_rx_dma_timeout(struct bfin_serial_port *uart)
{
	int x_pos, pos;

	dma_disable_irq_nosync(uart->rx_dma_channel);
	spin_lock_bh(&uart->rx_lock);

	/* 2D DMA RX buffer ring is used. Because curr_y_count and
	 * curr_x_count can't be read as an atomic operation,
	 * curr_y_count should be read before curr_x_count. When
	 * curr_x_count is read, curr_y_count may already indicate
	 * next buffer line. But, the position calculated here is
	 * still indicate the old line. The wrong position data may
	 * be smaller than current buffer tail, which cause garbages
	 * are received if it is not prohibit.
	 */
	uart->rx_dma_nrows = get_dma_curr_ycount(uart->rx_dma_channel);
	x_pos = get_dma_curr_xcount(uart->rx_dma_channel);
	uart->rx_dma_nrows = DMA_RX_YCOUNT - uart->rx_dma_nrows;
	if (uart->rx_dma_nrows == DMA_RX_YCOUNT || x_pos == 0)
		uart->rx_dma_nrows = 0;
	x_pos = DMA_RX_XCOUNT - x_pos;
	if (x_pos == DMA_RX_XCOUNT)
		x_pos = 0;

	pos = uart->rx_dma_nrows * DMA_RX_XCOUNT + x_pos;
	/* Ignore receiving data if new position is in the same line of
	 * current buffer tail and small.
	 */
	if (pos > uart->rx_dma_buf.tail ||
		uart->rx_dma_nrows < (uart->rx_dma_buf.tail/DMA_RX_XCOUNT)) {
		uart->rx_dma_buf.head = pos;
		bfin_serial_dma_rx_chars(uart);
		uart->rx_dma_buf.tail = uart->rx_dma_buf.head;
	}

	spin_unlock_bh(&uart->rx_lock);
	dma_enable_irq(uart->rx_dma_channel);

	mod_timer(&(uart->rx_dma_timer), jiffies + DMA_RX_FLUSH_JIFFIES);
}