Beispiel #1
0
static int rs_chars_in_buffer(struct tty_struct *tty)
{
	struct async_struct *info = (struct async_struct *)tty->driver_data;

	return CIRC_CNT(info->xmit.head, info->xmit.tail, SERIAL_XMIT_SIZE);
}
Beispiel #2
0
int dfd_shrink(struct shrinker *shrinker, struct shrink_control *sc)
{
#ifndef DFD_SHRINKER_DISABLE_DFD_ON_MEM_PRESSURE
	struct dfd_node *pentry;
	unsigned long nr = sc->nr_to_scan;
#endif
	unsigned long flags;
	unsigned long nr_objs;

	spin_lock_irqsave(&dfd_list_lock, flags);
	nr_objs = CIRC_CNT(dfd_node_list.head, dfd_node_list.tail,
		KFREE_CIRC_BUF_SIZE);
	spin_unlock_irqrestore(&dfd_list_lock, flags);

	/* nothing to reclaim from here */
	if (nr_objs == 0) {
		nr_objs = -1;
		goto out;
	}

#ifdef DFD_SHRINKER_DISABLE_DFD_ON_MEM_PRESSURE
	/* disable double free detection. This will flush
	 * the entire circular buffer out. */
	dfd_disable();
#else
	/* return max slab objects freeable */
	if (nr == 0)
		return  nr_objs;

	if (nr > nr_objs)
		nr = nr_objs;

	pr_debug("%s: nr_objs=%lu\n", __func__, nr_objs);
	while (nr) {
		unsigned long cnt;
		void *tofree = NULL;
		spin_lock_irqsave(&dfd_list_lock, flags);
		cnt = CIRC_CNT(dfd_node_list.head, dfd_node_list.tail,
			KFREE_CIRC_BUF_SIZE);
		if (cnt > 0) {
			pentry = circ_buf_get(&dfd_node_list);
			if (pentry != NULL)
				tofree = pentry->addr;
		}
		spin_unlock_irqrestore(&dfd_list_lock, flags);
		if (tofree)
			kfree((void *)((unsigned long)tofree |
				KFREE_HOOK_BYPASS_MASK));
		nr--;
	}
#endif
	spin_lock_irqsave(&dfd_list_lock, flags);
	nr_objs = CIRC_CNT(dfd_node_list.head, dfd_node_list.tail,
		KFREE_CIRC_BUF_SIZE);
	spin_unlock_irqrestore(&dfd_list_lock, flags);
	if (nr_objs == 0) {
		pr_info("%s: nothing more to reclaim from here!\n", __func__);
		nr_objs = -1;
	}

out:
	return nr_objs;
}
Beispiel #3
0
//-----------------------------------------------------------------------------
/// EMAC Interrupt handler
//-----------------------------------------------------------------------------
void EMAC_Handler(void)
{
    volatile EmacTxTDescriptor *pTxTd;
    volatile EMAC_TxCallback   *pTxCb;
    unsigned int isr;
    unsigned int rsr;
    unsigned int tsr;
    unsigned int rxStatusFlag;
    unsigned int txStatusFlag;

    //TRACE_DEBUG("EMAC_Handler\n\r");
    isr = AT91C_BASE_EMAC->EMAC_ISR & AT91C_BASE_EMAC->EMAC_IMR;
    rsr = AT91C_BASE_EMAC->EMAC_RSR;
    tsr = AT91C_BASE_EMAC->EMAC_TSR;

    // RX packet
    if ((isr & AT91C_EMAC_RCOMP) || (rsr & AT91C_EMAC_REC)) {
        rxStatusFlag = AT91C_EMAC_REC;

        // Frame received
        EmacStatistics.rx_packets++;

        // Check OVR
        if (rsr & AT91C_EMAC_OVR) {
            rxStatusFlag |= AT91C_EMAC_OVR;
            EmacStatistics.rx_ovrs++;
        }
        // Check BNA
        if (rsr & AT91C_EMAC_BNA) {
            rxStatusFlag |= AT91C_EMAC_BNA;
            EmacStatistics.rx_bnas++;
        }
        // Clear status
        AT91C_BASE_EMAC->EMAC_RSR |= rxStatusFlag;

        // Invoke callbacks
        if (rxTd.rxCb) {
            rxTd.rxCb(rxStatusFlag);
        }
    }

    // TX packet
    if ((isr & AT91C_EMAC_TCOMP) || (tsr & AT91C_EMAC_COMP)) {

        txStatusFlag = AT91C_EMAC_COMP;
        EmacStatistics.tx_comp ++;

        // A frame transmitted
        // Check RLE
        if (tsr & AT91C_EMAC_RLES) {
            txStatusFlag |= AT91C_EMAC_RLES;
            EmacStatistics.tx_errors++;
        }
        // Check COL
        if (tsr & AT91C_EMAC_COL) {
            txStatusFlag |= AT91C_EMAC_COL;
            EmacStatistics.collisions++;
        }
        // Check BEX
        if (tsr & AT91C_EMAC_BEX) {
            txStatusFlag |= AT91C_EMAC_BEX;
            EmacStatistics.tx_exausts++;
        }
        // Check UND
        if (tsr & AT91C_EMAC_UND) {
            txStatusFlag |= AT91C_EMAC_UND;
            EmacStatistics.tx_underruns++;
        }
        // Clear status
        AT91C_BASE_EMAC->EMAC_TSR |= txStatusFlag;

        // Sanity check: Tx buffers have to be scheduled
        ASSERT(!CIRC_EMPTY(&txTd),
               "-F- EMAC Tx interrupt received meanwhile no TX buffers has been scheduled\n\r");

        // Check the buffers
        while (CIRC_CNT(txTd.head, txTd.tail, TX_BUFFERS)) {
            pTxTd = txTd.td + txTd.tail;
            pTxCb = txTd.txCb + txTd.tail;

            // Exit if buffer has not been sent yet
            if ((pTxTd->status & EMAC_TX_USED_BIT) == 0) {
                break;
            }

            // Notify upper layer that packet has been sent
            if (*pTxCb) {
                (*pTxCb)(txStatusFlag);
            }

            CIRC_INC( txTd.tail, TX_BUFFERS );
        }

        // If a wakeup has been scheduled, notify upper layer that it can send
        // other packets, send will be successfull.
        if( (CIRC_SPACE(txTd.head, txTd.tail, TX_BUFFERS) >= txTd.wakeupThreshold)
                &&  txTd.wakeupCb) {
            txTd.wakeupCb();
        }
    }
}
Beispiel #4
0
/*!
 * This function implements IOCTL controls on a PMIC device.
 *
 * @param        inode       pointer on the node
 * @param        file        pointer on the file
 * @param        cmd         the command
 * @param        arg         the parameter
 * @return       This function returns 0 if successful.
 */
static int pmic_dev_ioctl(struct inode *inode, struct file *file,
			  unsigned int cmd, unsigned long arg)
{
	register_info reg_info;
	pmic_event_callback_t event_sub;
	type_event event = EVENT_NB;
	int ret = 0;

	if (_IOC_TYPE(cmd) != 'P')
		return -ENOTTY;

	switch (cmd) {
	case PMIC_READ_REG:
		if (copy_from_user(&reg_info, (register_info *) arg,
				   sizeof(register_info))) {
			return -EFAULT;
		}
		ret =
		    pmic_read_reg(reg_info.reg, &(reg_info.reg_value),
				  0x00ffffff);
		pr_debug("read reg %d %x\n", reg_info.reg, reg_info.reg_value);
		if (copy_to_user((register_info *) arg, &reg_info,
				 sizeof(register_info))) {
			return -EFAULT;
		}
		break;

	case PMIC_WRITE_REG:
		if (copy_from_user(&reg_info, (register_info *) arg,
				   sizeof(register_info))) {
			return -EFAULT;
		}
		ret =
		    pmic_write_reg(reg_info.reg, reg_info.reg_value,
				   0x00ffffff);
		pr_debug("write reg %d %x\n", reg_info.reg, reg_info.reg_value);
		if (copy_to_user((register_info *) arg, &reg_info,
				 sizeof(register_info))) {
			return -EFAULT;
		}
		break;

	case PMIC_SUBSCRIBE:
		if (get_user(event, (int __user *)arg)) {
			return -EFAULT;
		}
		event_sub.func = callbackfn;
		event_sub.param = (void *)event;
		ret = pmic_event_subscribe(event, event_sub);
		pr_debug("subscribe done\n");
		break;

	case PMIC_UNSUBSCRIBE:
		if (get_user(event, (int __user *)arg)) {
			return -EFAULT;
		}
		event_sub.func = callbackfn;
		event_sub.param = (void *)event;
		ret = pmic_event_unsubscribe(event, event_sub);
		pr_debug("unsubscribe done\n");
		break;

	case PMIC_NOTIFY_USER:
		if (get_user(event, (int __user *)arg)) {
			return -EFAULT;
		}
		event_sub.func = user_notify_callback;
		event_sub.param = (void *)event;
		ret = pmic_event_subscribe(event, event_sub);
		break;

	case PMIC_GET_NOTIFY:
		down(&event_mutex);
		if (CIRC_CNT(pmic_events.head, pmic_events.tail, CIRC_BUF_MAX)) {
			event = (int)pmic_events.buf[pmic_events.tail];
			pmic_events.tail = (pmic_events.tail + 1) & (CIRC_BUF_MAX - 1);
		} else {
			pr_info("No valid notified event\n");
		}
		up(&event_mutex);

		if (put_user(event, (int __user *)arg)) {
			return -EFAULT;
		}
		break;

	default:
		printk(KERN_ERR "%d unsupported ioctl command\n", (int)cmd);
		return -EINVAL;
	}

	return ret;
}
Beispiel #5
0
static void bfin_serial_rx_chars(struct bfin_serial_port *uart)
{
	struct tty_struct *tty = NULL;
	unsigned int status, ch, flg;
	static struct timeval anomaly_start = { .tv_sec = 0 };

	status = UART_GET_LSR(uart);
	UART_CLEAR_LSR(uart);

 	ch = UART_GET_CHAR(uart);
 	uart->port.icount.rx++;

#if defined(CONFIG_KGDB_SERIAL_CONSOLE) || \
	defined(CONFIG_KGDB_SERIAL_CONSOLE_MODULE)
	if (kgdb_connected && kgdboc_port_line == uart->port.line)
		if (ch == 0x3) {/* Ctrl + C */
			kgdb_breakpoint();
			return;
		}

	if (!uart->port.info || !uart->port.info->port.tty)
		return;
#endif
	tty = uart->port.info->port.tty;

	if (ANOMALY_05000363) {
		/* The BF533 (and BF561) family of processors have a nice anomaly
		 * where they continuously generate characters for a "single" break.
		 * We have to basically ignore this flood until the "next" valid
		 * character comes across.  Due to the nature of the flood, it is
		 * not possible to reliably catch bytes that are sent too quickly
		 * after this break.  So application code talking to the Blackfin
		 * which sends a break signal must allow at least 1.5 character
		 * times after the end of the break for things to stabilize.  This
		 * timeout was picked as it must absolutely be larger than 1
		 * character time +/- some percent.  So 1.5 sounds good.  All other
		 * Blackfin families operate properly.  Woo.
		 */
		if (anomaly_start.tv_sec) {
			struct timeval curr;
			suseconds_t usecs;

			if ((~ch & (~ch + 1)) & 0xff)
				goto known_good_char;

			do_gettimeofday(&curr);
			if (curr.tv_sec - anomaly_start.tv_sec > 1)
				goto known_good_char;

			usecs = 0;
			if (curr.tv_sec != anomaly_start.tv_sec)
				usecs += USEC_PER_SEC;
			usecs += curr.tv_usec - anomaly_start.tv_usec;

			if (usecs > UART_GET_ANOMALY_THRESHOLD(uart))
				goto known_good_char;

			if (ch)
				anomaly_start.tv_sec = 0;
			else
				anomaly_start = curr;

			return;

 known_good_char:
			status &= ~BI;
			anomaly_start.tv_sec = 0;
		}
	}

	if (status & BI) {
		if (ANOMALY_05000363)
			if (bfin_revid() < 5)
				do_gettimeofday(&anomaly_start);
		uart->port.icount.brk++;
		if (uart_handle_break(&uart->port))
			goto ignore_char;
		status &= ~(PE | FE);
	}
	if (status & PE)
		uart->port.icount.parity++;
	if (status & OE)
		uart->port.icount.overrun++;
	if (status & FE)
		uart->port.icount.frame++;

	status &= uart->port.read_status_mask;

	if (status & BI)
		flg = TTY_BREAK;
	else if (status & PE)
		flg = TTY_PARITY;
	else if (status & FE)
		flg = TTY_FRAME;
	else
		flg = TTY_NORMAL;

	if (uart_handle_sysrq_char(&uart->port, ch))
		goto ignore_char;

	uart_insert_char(&uart->port, status, OE, ch, flg);

 ignore_char:
	tty_flip_buffer_push(tty);
}

static void bfin_serial_tx_chars(struct bfin_serial_port *uart)
{
	struct circ_buf *xmit = &uart->port.info->xmit;

	if (uart_circ_empty(xmit) || uart_tx_stopped(&uart->port)) {
#ifdef CONFIG_BF54x
		/* Clear TFI bit */
		UART_PUT_LSR(uart, TFI);
#endif
		UART_CLEAR_IER(uart, ETBEI);
		return;
	}

	if (uart->port.x_char) {
		UART_PUT_CHAR(uart, uart->port.x_char);
		uart->port.icount.tx++;
		uart->port.x_char = 0;
	}

	while ((UART_GET_LSR(uart) & THRE) && xmit->tail != xmit->head) {
		UART_PUT_CHAR(uart, xmit->buf[xmit->tail]);
		xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1);
		uart->port.icount.tx++;
		SSYNC();
	}

	if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
		uart_write_wakeup(&uart->port);
}

static irqreturn_t bfin_serial_rx_int(int irq, void *dev_id)
{
	struct bfin_serial_port *uart = dev_id;

	spin_lock(&uart->port.lock);
	while (UART_GET_LSR(uart) & DR)
		bfin_serial_rx_chars(uart);
	spin_unlock(&uart->port.lock);

	return IRQ_HANDLED;
}

static irqreturn_t bfin_serial_tx_int(int irq, void *dev_id)
{
	struct bfin_serial_port *uart = dev_id;

#ifdef CONFIG_SERIAL_BFIN_HARD_CTSRTS
	if (uart->scts && !(bfin_serial_get_mctrl(&uart->port) & TIOCM_CTS)) {
		uart->scts = 0;
		uart_handle_cts_change(&uart->port, uart->scts);
	}
#endif
	spin_lock(&uart->port.lock);
	if (UART_GET_LSR(uart) & THRE)
		bfin_serial_tx_chars(uart);
	spin_unlock(&uart->port.lock);

	return IRQ_HANDLED;
}
#endif

#ifdef CONFIG_SERIAL_BFIN_DMA
static void bfin_serial_dma_tx_chars(struct bfin_serial_port *uart)
{
	struct circ_buf *xmit = &uart->port.info->xmit;

	uart->tx_done = 0;

	if (uart_circ_empty(xmit) || uart_tx_stopped(&uart->port)) {
		uart->tx_count = 0;
		uart->tx_done = 1;
		return;
	}

	if (uart->port.x_char) {
		UART_PUT_CHAR(uart, uart->port.x_char);
		uart->port.icount.tx++;
		uart->port.x_char = 0;
	}

	uart->tx_count = CIRC_CNT(xmit->head, xmit->tail, UART_XMIT_SIZE);
	if (uart->tx_count > (UART_XMIT_SIZE - xmit->tail))
		uart->tx_count = UART_XMIT_SIZE - xmit->tail;
	blackfin_dcache_flush_range((unsigned long)(xmit->buf+xmit->tail),
					(unsigned long)(xmit->buf+xmit->tail+uart->tx_count));
	set_dma_config(uart->tx_dma_channel,
		set_bfin_dma_config(DIR_READ, DMA_FLOW_STOP,
			INTR_ON_BUF,
			DIMENSION_LINEAR,
			DATA_SIZE_8,
			DMA_SYNC_RESTART));
	set_dma_start_addr(uart->tx_dma_channel, (unsigned long)(xmit->buf+xmit->tail));
	set_dma_x_count(uart->tx_dma_channel, uart->tx_count);
	set_dma_x_modify(uart->tx_dma_channel, 1);
	enable_dma(uart->tx_dma_channel);

	UART_SET_IER(uart, ETBEI);
}

static void bfin_serial_dma_rx_chars(struct bfin_serial_port *uart)
{
	struct tty_struct *tty = uart->port.info->port.tty;
	int i, flg, status;

	status = UART_GET_LSR(uart);
	UART_CLEAR_LSR(uart);

	uart->port.icount.rx +=
		CIRC_CNT(uart->rx_dma_buf.head, uart->rx_dma_buf.tail,
		UART_XMIT_SIZE);

	if (status & BI) {
		uart->port.icount.brk++;
		if (uart_handle_break(&uart->port))
			goto dma_ignore_char;
		status &= ~(PE | FE);
	}
	if (status & PE)
		uart->port.icount.parity++;
	if (status & OE)
		uart->port.icount.overrun++;
	if (status & FE)
		uart->port.icount.frame++;

	status &= uart->port.read_status_mask;

	if (status & BI)
		flg = TTY_BREAK;
	else if (status & PE)
		flg = TTY_PARITY;
	else if (status & FE)
		flg = TTY_FRAME;
	else
		flg = TTY_NORMAL;

	for (i = uart->rx_dma_buf.tail; ; i++) {
		if (i >= UART_XMIT_SIZE)
			i = 0;
		if (i == uart->rx_dma_buf.head)
			break;
		if (!uart_handle_sysrq_char(&uart->port, uart->rx_dma_buf.buf[i]))
			uart_insert_char(&uart->port, status, OE,
				uart->rx_dma_buf.buf[i], flg);
	}

 dma_ignore_char:
	tty_flip_buffer_push(tty);
}

void bfin_serial_rx_dma_timeout(struct bfin_serial_port *uart)
{
	int x_pos, pos;
	unsigned long flags;

	spin_lock_irqsave(&uart->port.lock, flags);

	uart->rx_dma_nrows = get_dma_curr_ycount(uart->rx_dma_channel);
	x_pos = get_dma_curr_xcount(uart->rx_dma_channel);
	uart->rx_dma_nrows = DMA_RX_YCOUNT - uart->rx_dma_nrows;
	if (uart->rx_dma_nrows == DMA_RX_YCOUNT)
		uart->rx_dma_nrows = 0;
	x_pos = DMA_RX_XCOUNT - x_pos;
	if (x_pos == DMA_RX_XCOUNT)
		x_pos = 0;

	pos = uart->rx_dma_nrows * DMA_RX_XCOUNT + x_pos;
	if (pos != uart->rx_dma_buf.tail) {
		uart->rx_dma_buf.head = pos;
		bfin_serial_dma_rx_chars(uart);
		uart->rx_dma_buf.tail = uart->rx_dma_buf.head;
	}

	spin_unlock_irqrestore(&uart->port.lock, flags);

	mod_timer(&(uart->rx_dma_timer), jiffies + DMA_RX_FLUSH_JIFFIES);
}
Beispiel #6
0
static int spi_tty_buf_data_avail(struct circ_buf *cb)
{
	return CIRC_CNT(cb->head, cb->tail, SPI_TTY_BUF_SIZE);
}
Beispiel #7
0
/**
 * \brief EMAC Interrupt handler.
 *
 * \param p_emac_dev   Pointer to EMAC device instance.
 */
void emac_handler(emac_device_t* p_emac_dev)
{
	Emac *p_hw = p_emac_dev->p_hw;

	emac_tx_descriptor_t *p_tx_td;
	emac_dev_tx_cb_t *p_tx_cb;
	volatile uint32_t ul_isr;
	volatile uint32_t ul_rsr;
	volatile uint32_t ul_tsr;
	uint32_t ul_rx_status_flag;
	uint32_t ul_tx_status_flag;

	ul_isr = emac_get_interrupt_status(p_hw);
	ul_rsr = emac_get_rx_status(p_hw);
	ul_tsr = emac_get_tx_status(p_hw);

	ul_isr &= ~(emac_get_interrupt_mask(p_hw) | 0xFFC300);

	/* RX packet */
	if ((ul_isr & EMAC_ISR_RCOMP) || (ul_rsr & EMAC_RSR_REC)) {
		ul_rx_status_flag = EMAC_RSR_REC;

		/* Check OVR */
		if (ul_rsr & EMAC_RSR_OVR) {
			ul_rx_status_flag |= EMAC_RSR_OVR;
		}
		/* Check BNA */
		if (ul_rsr & EMAC_RSR_BNA) {
			ul_rx_status_flag |= EMAC_RSR_BNA;
		}
		/* Clear status */
		emac_clear_rx_status(p_hw, ul_rx_status_flag);

		/* Invoke callbacks */
		if (p_emac_dev->func_rx_cb) {
			p_emac_dev->func_rx_cb(ul_rx_status_flag);
		}
	}

	/* TX packet */
	if ((ul_isr & EMAC_ISR_TCOMP) || (ul_tsr & EMAC_TSR_COMP)) {

		ul_tx_status_flag = EMAC_TSR_COMP;

		/* A frame transmitted */

		/* Check RLE */
		if (ul_tsr & EMAC_TSR_RLES) {
			/* Status RLE & Number of discarded buffers */
			ul_tx_status_flag = EMAC_TSR_RLES | CIRC_CNT(p_emac_dev->us_tx_head,
					p_emac_dev->us_tx_tail, p_emac_dev->us_tx_list_size);
			p_tx_cb = &p_emac_dev->func_tx_cb_list[p_emac_dev->us_tx_tail];
			emac_reset_tx_mem(p_emac_dev);
			emac_enable_transmit(p_hw, 1);
		}
		/* Check COL */
		if (ul_tsr & EMAC_TSR_COL) {
			ul_tx_status_flag |= EMAC_TSR_COL;
		}
		/* Check BEX */
		if (ul_tsr & EMAC_TSR_BEX) {
			ul_tx_status_flag |= EMAC_TSR_BEX;
		}
		/* Check UND */
		if (ul_tsr & EMAC_TSR_UND) {
			ul_tx_status_flag |= EMAC_TSR_UND;
		}
		/* Clear status */
		emac_clear_tx_status(p_hw, ul_tx_status_flag);

		if (!CIRC_EMPTY(p_emac_dev->us_tx_head, p_emac_dev->us_tx_tail)) {
			/* Check the buffers */
			do {
				p_tx_td = &p_emac_dev->p_tx_dscr[p_emac_dev->us_tx_tail];
				p_tx_cb = &p_emac_dev->func_tx_cb_list[p_emac_dev->us_tx_tail];
				/* Any error? Exit if buffer has not been sent yet */
				if ((p_tx_td->status.val & EMAC_TXD_USED) == 0) {
					break;
				}

				/* Notify upper layer that a packet has been sent */
				if (*p_tx_cb) {
					(*p_tx_cb) (ul_tx_status_flag);
				}

				circ_inc(&p_emac_dev->us_tx_tail, p_emac_dev->us_tx_list_size);
			} while (CIRC_CNT(p_emac_dev->us_tx_head, p_emac_dev->us_tx_tail,
							p_emac_dev->us_tx_list_size));
		}

		if (ul_tsr & EMAC_TSR_RLES) {
			/* Notify upper layer RLE */
			if (*p_tx_cb) {
				(*p_tx_cb) (ul_tx_status_flag);
			}
		}

		/* If a wakeup has been scheduled, notify upper layer that it can
		   send other packets, and the sending will be successful. */
		if ((CIRC_SPACE(p_emac_dev->us_tx_head, p_emac_dev->us_tx_tail,
				p_emac_dev->us_tx_list_size) >= p_emac_dev->uc_wakeup_threshold)
				&& p_emac_dev->func_wakeup_cb) {
			p_emac_dev->func_wakeup_cb();
		}
	}
}
Beispiel #8
0
/**
 * \brief Get current load of transmit.
 *
 * \param p_emac_dev Pointer to the EMAC device instance.
 *
 * \return Current load of transmit. 
 */
uint32_t emac_dev_get_tx_load(emac_device_t* p_emac_dev)
{
	uint16_t us_head = p_emac_dev->us_tx_head;
	uint16_t us_tail = p_emac_dev->us_tx_tail;
	return CIRC_CNT(us_head, us_tail, p_emac_dev->us_tx_list_size);
}