Пример #1
0
/*
 * wait for space to appear in the transmit/ACK window
 * - caller holds the socket locked
 */
static int rxrpc_wait_for_tx_window(struct rxrpc_sock *rx,
				    struct rxrpc_call *call,
				    long *timeo)
{
	DECLARE_WAITQUEUE(myself, current);
	int ret;

	_enter(",{%d},%ld",
	       CIRC_SPACE(call->acks_head, call->acks_tail, call->acks_winsz),
	       *timeo);

	add_wait_queue(&call->tx_waitq, &myself);

	for (;;) {
		set_current_state(TASK_INTERRUPTIBLE);
		ret = 0;
		if (CIRC_SPACE(call->acks_head, call->acks_tail,
			       call->acks_winsz) > 0)
			break;
		if (signal_pending(current)) {
			ret = sock_intr_errno(*timeo);
			break;
		}

		release_sock(&rx->sk);
		*timeo = schedule_timeout(*timeo);
		lock_sock(&rx->sk);
	}

	remove_wait_queue(&call->tx_waitq, &myself);
	set_current_state(TASK_RUNNING);
	_leave(" = %d", ret);
	return ret;
}
Пример #2
0
static int rs_put_char(struct tty_struct *tty, unsigned char ch)
{
	struct serial_state *info;
	unsigned long flags;

	info = tty->driver_data;

	if (serial_paranoia_check(info, tty->name, "rs_put_char"))
		return 0;

	if (!info->xmit.buf)
		return 0;

	local_irq_save(flags);
	if (CIRC_SPACE(info->xmit.head,
		       info->xmit.tail,
		       SERIAL_XMIT_SIZE) == 0) {
		local_irq_restore(flags);
		return 0;
	}

	info->xmit.buf[info->xmit.head++] = ch;
	info->xmit.head &= SERIAL_XMIT_SIZE-1;
	local_irq_restore(flags);
	return 1;
}
static void ev3_uart_receive_buf(struct tty_struct *tty,
				 const unsigned char *cp, char *fp, int count)
{
	struct ev3_uart_port_data *port = tty->disc_data;
	struct circ_buf *cb = &port->circ_buf;
	int size;

	if (port->closing)
		return;

	if (count > CIRC_SPACE(cb->head, cb->tail, EV3_UART_BUFFER_SIZE))
		return;

	size = CIRC_SPACE_TO_END(cb->head, cb->tail, EV3_UART_BUFFER_SIZE);
	if (count > size) {
		memcpy(cb->buf + cb->head, cp, size);
		memcpy(cb->buf, cp + size, count - size);
		cb->head = count - size;
	} else {
		memcpy(cb->buf + cb->head, cp, count);
		cb->head += count;
	}

	schedule_work(&port->rx_data_work);
}
Пример #4
0
/********************************************************************************
 *  Description:
 *   Input Args:
 *  Output Args:
 * Return Value:
 ********************************************************************************/
int main (int argc, char **argv)
{
    int i, len;
    struct circ_buf        tx_ring; 
    char   data[LEN];
    char   buf[LEN];

    memset(&tx_ring, 0, sizeof(struct circ_buf)); 
    tx_ring.buf = malloc(CIRC_BUF_SIZE); 
    if( NULL == tx_ring.buf )
    {
        printf("Allocate Ring buffer failure.\n");
        return -1;
    }

    memset(data, 0, sizeof(data));
    /* Prepare for the data */
    for(i=0; i<sizeof(data); i++)
    {
        data[i] = 30+i;
    }

    printf("CIRC_SPACE: %d\n", CIRC_SPACE(tx_ring.head, tx_ring.tail, CIRC_BUF_SIZE));
    printf("CIRC_SPACE_TO_END: %d\n", CIRC_SPACE_TO_END(tx_ring.head, tx_ring.tail, CIRC_BUF_SIZE));
    printf("CIRC_CNT: %d\n", CIRC_CNT(tx_ring.head, tx_ring.tail, CIRC_BUF_SIZE));
    printf("CIRC_CNT_TO_END: %d\n", CIRC_CNT_TO_END(tx_ring.head, tx_ring.tail, CIRC_BUF_SIZE));
    while(1)
    {
       produce_item(&tx_ring, data, sizeof(data));
       len = consume_item(&tx_ring, buf, sizeof(buf) );
       sleep(1);
    }

    return 0;
} /* ----- End of main() ----- */
Пример #5
0
/* -1 --- error, can't enqueue -- no space available */
static int jr_enqueue(uint32_t *desc_addr,
	       void (*callback)(uint32_t desc, uint32_t status, void *arg),
	       void *arg)
{
	struct jr_regs *regs = (struct jr_regs *)CONFIG_SYS_FSL_JR0_ADDR;
	int head = jr.head;
	dma_addr_t desc_phys_addr = virt_to_phys(desc_addr);

	if (sec_in32(&regs->irsa) == 0 ||
	    CIRC_SPACE(jr.head, jr.tail, jr.size) <= 0)
		return -1;

	jr.input_ring[head] = desc_phys_addr;
	jr.info[head].desc_phys_addr = desc_phys_addr;
	jr.info[head].desc_addr = (uint32_t)desc_addr;
	jr.info[head].callback = (void *)callback;
	jr.info[head].arg = arg;
	jr.info[head].op_done = 0;

	jr.head = (head + 1) & (jr.size - 1);

	sec_out32(&regs->irja, 1);

	return 0;
}
Пример #6
0
Файл: pcie.c Проект: Lyude/linux
static int qtnf_tx_queue_ready(struct qtnf_pcie_bus_priv *priv)
{
	if (!CIRC_SPACE(priv->tx_bd_w_index, priv->tx_bd_r_index,
			priv->tx_bd_num)) {
		qtnf_pcie_data_tx_reclaim(priv);

		if (!CIRC_SPACE(priv->tx_bd_w_index, priv->tx_bd_r_index,
				priv->tx_bd_num)) {
			pr_warn_ratelimited("reclaim full Tx queue\n");
			priv->tx_full_count++;
			return 0;
		}
	}

	return 1;
}
Пример #7
0
static int qtnf_dbg_hdp_stats(struct seq_file *s, void *data)
{
	struct qtnf_bus *bus = dev_get_drvdata(s->private);
	struct qtnf_pcie_pearl_state *ps = get_bus_priv(bus);
	struct qtnf_pcie_bus_priv *priv = &ps->base;

	seq_printf(s, "tx_full_count(%u)\n", priv->tx_full_count);
	seq_printf(s, "tx_done_count(%u)\n", priv->tx_done_count);
	seq_printf(s, "tx_reclaim_done(%u)\n", priv->tx_reclaim_done);
	seq_printf(s, "tx_reclaim_req(%u)\n", priv->tx_reclaim_req);

	seq_printf(s, "tx_bd_r_index(%u)\n", priv->tx_bd_r_index);
	seq_printf(s, "tx_bd_p_index(%u)\n",
		   readl(PCIE_HDP_RX0DMA_CNT(ps->pcie_reg_base))
			& (priv->tx_bd_num - 1));
	seq_printf(s, "tx_bd_w_index(%u)\n", priv->tx_bd_w_index);
	seq_printf(s, "tx queue len(%u)\n",
		   CIRC_CNT(priv->tx_bd_w_index, priv->tx_bd_r_index,
			    priv->tx_bd_num));

	seq_printf(s, "rx_bd_r_index(%u)\n", priv->rx_bd_r_index);
	seq_printf(s, "rx_bd_p_index(%u)\n",
		   readl(PCIE_HDP_TX0DMA_CNT(ps->pcie_reg_base))
			& (priv->rx_bd_num - 1));
	seq_printf(s, "rx_bd_w_index(%u)\n", priv->rx_bd_w_index);
	seq_printf(s, "rx alloc queue len(%u)\n",
		   CIRC_SPACE(priv->rx_bd_w_index, priv->rx_bd_r_index,
			      priv->rx_bd_num));

	return 0;
}
Пример #8
0
static void hisi_femac_rx_refill(struct hisi_femac_priv *priv)
{
	struct hisi_femac_queue *rxq = &priv->rxq;
	struct sk_buff *skb;
	u32 pos;
	u32 len = MAX_FRAME_SIZE;
	dma_addr_t addr;

	pos = rxq->head;
	while (readl(priv->port_base + ADDRQ_STAT) & BIT_RX_READY) {
		if (!CIRC_SPACE(pos, rxq->tail, rxq->num))
			break;
		if (unlikely(rxq->skb[pos])) {
			netdev_err(priv->ndev, "err skb[%d]=%p\n",
				   pos, rxq->skb[pos]);
			break;
		}
		skb = netdev_alloc_skb_ip_align(priv->ndev, len);
		if (unlikely(!skb))
			break;

		addr = dma_map_single(priv->dev, skb->data, len,
				      DMA_FROM_DEVICE);
		if (dma_mapping_error(priv->dev, addr)) {
			dev_kfree_skb_any(skb);
			break;
		}
		rxq->dma_phys[pos] = addr;
		rxq->skb[pos] = skb;
		writel(addr, priv->port_base + IQ_ADDR);
		pos = (pos + 1) % rxq->num;
	}
	rxq->head = pos;
}
Пример #9
0
static int rs_write_room(struct tty_struct *tty)
{
	struct serial_state *info = tty->driver_data;

	if (serial_paranoia_check(info, tty->name, "rs_write_room"))
		return 0;
	return CIRC_SPACE(info->xmit.head, info->xmit.tail, SERIAL_XMIT_SIZE);
}
Пример #10
0
/**
 * \brief Send ulLength bytes from pcFrom. This copies the buffer to one of the
 * GMAC Tx buffers, and then indicates to the GMAC that the buffer is ready.
 * If lEndOfFrame is true then the data being copied is the end of the frame
 * and the frame can be transmitted.
 *
 * \param p_gmac_dev Pointer to the GMAC device instance.
 * \param p_buffer       Pointer to the data buffer.
 * \param ul_size    Length of the frame.
 * \param func_tx_cb  Transmit callback function.
 *
 * \return Length sent.
 */
uint32_t gmac_dev_write(gmac_device_t* p_gmac_dev, void *p_buffer,
		uint32_t ul_size, gmac_dev_tx_cb_t func_tx_cb)
{

	volatile gmac_tx_descriptor_t *p_tx_td;
	volatile gmac_dev_tx_cb_t *p_func_tx_cb;

	Gmac *p_hw = p_gmac_dev->p_hw;


	/* Check parameter */
	if (ul_size > GMAC_TX_UNITSIZE) {
		return GMAC_PARAM;
	}

	/* Pointers to the current transmit descriptor */
	p_tx_td = &p_gmac_dev->p_tx_dscr[p_gmac_dev->us_tx_head];

	/* If no free TxTd, buffer can't be sent, schedule the wakeup callback */
	if (CIRC_SPACE(p_gmac_dev->us_tx_head, p_gmac_dev->us_tx_tail,
					p_gmac_dev->us_tx_list_size) == 0) {
		if (p_tx_td[p_gmac_dev->us_tx_head].status.val & GMAC_TXD_USED)
			return GMAC_TX_BUSY;
	}

	/* Pointers to the current Tx callback */
	p_func_tx_cb = &p_gmac_dev->func_tx_cb_list[p_gmac_dev->us_tx_head];

	/* Set up/copy data to transmission buffer */
	if (p_buffer && ul_size) {
		/* Driver manages the ring buffer */
		memcpy((void *)p_tx_td->addr, p_buffer, ul_size);
	}

	/* Tx callback */
	*p_func_tx_cb = func_tx_cb;

	/* Update transmit descriptor status */

	/* The buffer size defined is the length of ethernet frame,
	   so it's always the last buffer of the frame. */
	if (p_gmac_dev->us_tx_head == p_gmac_dev->us_tx_list_size - 1) {
		p_tx_td->status.val =
				(ul_size & GMAC_TXD_LEN_MASK) | GMAC_TXD_LAST
				| GMAC_TXD_WRAP;
	} else {
		p_tx_td->status.val =
				(ul_size & GMAC_TXD_LEN_MASK) | GMAC_TXD_LAST;
	}

	circ_inc(&p_gmac_dev->us_tx_head, p_gmac_dev->us_tx_list_size);

	/* Now start to transmit if it is still not done */
	gmac_start_transmission(p_hw);

	return GMAC_OK;
}
Пример #11
0
//-----------------------------------------------------------------------------
/// Return current load of TX.
//-----------------------------------------------------------------------------
unsigned int EMAC_TxLoad(void)
{
    unsigned short head = txTd.head;
    unsigned short tail = txTd.tail;
  #if 1
    return CIRC_CNT(head, tail, TX_BUFFERS);
  #else
    return (TX_BUFFERS - CIRC_SPACE(head, tail, TX_BUFFERS));
  #endif
}
//////////////////////////////////////////////////////////////////////////////
// ASYNCHRONOUS
//////////////////////////////////////////////////////////////////////////////
static  void kick_start_rx( void )
{
	 if ( usb_ref_count ) {
		  int total_space  = CIRC_SPACE( rx_ring.in, rx_ring.out, RBUF_SIZE );
		  if ( total_space >= RX_PACKET_SIZE ) {
			   pxa_usb_recv( packet_buffer,
								RX_PACKET_SIZE,
								rx_done_callback_packet_buffer
						      );
		  }
	 }
}
Пример #13
0
/*
 * Return the amount of space in the output buffer
 *
 * This is actually a contract between the driver and the tty layer outlining
 * how much write room the driver can guarantee will be sent OR BUFFERED.  This
 * driver MUST honor the return value.
 */
static int ehv_bc_tty_write_room(struct tty_struct *ttys)
{
	struct ehv_bc_data *bc = ttys->driver_data;
	unsigned long flags;
	int count;

	spin_lock_irqsave(&bc->lock, flags);
	count = CIRC_SPACE(bc->head, bc->tail, BUF_SIZE);
	spin_unlock_irqrestore(&bc->lock, flags);

	return count;
}
Пример #14
0
static void user_notify_callback(void *event)
{
	down(&event_mutex);
	if (CIRC_SPACE(pmic_events.head, pmic_events.tail, CIRC_BUF_MAX)) {
		pmic_events.buf[pmic_events.head] = (int)event;
		pmic_events.head = (pmic_events.head + 1) & (CIRC_BUF_MAX - 1);
	} else {
		pr_info("Failed to notify event to the user\n");
	}
	up(&event_mutex);

	kill_fasync(&pmic_dev_queue, SIGIO, POLL_IN);
}
Пример #15
0
void put_to_in(forth_context_type *fc, char c)
{
	set_current_state(TASK_INTERRUPTIBLE);
	while (CIRC_SPACE(fc->in.head,fc->in.tail,TIB_SIZE)==0)
	{
		schedule_timeout(1);
		set_current_state(TASK_INTERRUPTIBLE);
		if(fc->stop) return;
	}
	set_current_state(TASK_RUNNING);
	fc->in.buf[fc->in.head]=c;
	fc->in.head=(fc->in.head+1) & (TIB_SIZE-1);
}
Пример #16
0
static netdev_tx_t hisi_femac_net_xmit(struct sk_buff *skb,
				       struct net_device *dev)
{
	struct hisi_femac_priv *priv = netdev_priv(dev);
	struct hisi_femac_queue *txq = &priv->txq;
	dma_addr_t addr;
	u32 val;

	val = readl(priv->port_base + ADDRQ_STAT);
	val &= BIT_TX_READY;
	if (!val) {
		hisi_femac_irq_enable(priv, IRQ_INT_TX_PER_PACKET);
		dev->stats.tx_dropped++;
		dev->stats.tx_fifo_errors++;
		netif_stop_queue(dev);
		return NETDEV_TX_BUSY;
	}

	if (unlikely(!CIRC_SPACE(txq->head, txq->tail,
				 txq->num))) {
		hisi_femac_irq_enable(priv, IRQ_INT_TX_PER_PACKET);
		dev->stats.tx_dropped++;
		dev->stats.tx_fifo_errors++;
		netif_stop_queue(dev);
		return NETDEV_TX_BUSY;
	}

	addr = dma_map_single(priv->dev, skb->data,
			      skb->len, DMA_TO_DEVICE);
	if (unlikely(dma_mapping_error(priv->dev, addr))) {
		dev_kfree_skb_any(skb);
		dev->stats.tx_dropped++;
		return NETDEV_TX_OK;
	}
	txq->dma_phys[txq->head] = addr;

	txq->skb[txq->head] = skb;
	txq->head = (txq->head + 1) % txq->num;

	writel(addr, priv->port_base + EQ_ADDR);
	writel(skb->len + ETH_FCS_LEN, priv->port_base + EQFRM_LEN);

	priv->tx_fifo_used_cnt++;

	dev->stats.tx_packets++;
	dev->stats.tx_bytes += skb->len;
	netdev_sent_queue(dev, skb->len);

	return NETDEV_TX_OK;
}
Пример #17
0
/**
 * caam_jr_enqueue() - Enqueue a job descriptor head. Returns 0 if OK,
 * -EBUSY if the queue is full, -EIO if it cannot map the caller's
 * descriptor.
 * @dev:  device of the job ring to be used. This device should have
 *        been assigned prior by caam_jr_register().
 * @desc: points to a job descriptor that execute our request. All
 *        descriptors (and all referenced data) must be in a DMAable
 *        region, and all data references must be physical addresses
 *        accessible to CAAM (i.e. within a PAMU window granted
 *        to it).
 * @cbk:  pointer to a callback function to be invoked upon completion
 *        of this request. This has the form:
 *        callback(struct device *dev, u32 *desc, u32 stat, void *arg)
 *        where:
 *        @dev:    contains the job ring device that processed this
 *                 response.
 *        @desc:   descriptor that initiated the request, same as
 *                 "desc" being argued to caam_jr_enqueue().
 *        @status: untranslated status received from CAAM. See the
 *                 reference manual for a detailed description of
 *                 error meaning, or see the JRSTA definitions in the
 *                 register header file
 *        @areq:   optional pointer to an argument passed with the
 *                 original request
 * @areq: optional pointer to a user argument for use at callback
 *        time.
 **/
int caam_jr_enqueue(struct device *dev, u32 *desc,
		    void (*cbk)(struct device *dev, u32 *desc,
				u32 status, void *areq),
		    void *areq)
{
	struct caam_drv_private_jr *jrp = dev_get_drvdata(dev);
	struct caam_jrentry_info *head_entry;
	int head, tail, desc_size;
	dma_addr_t desc_dma;

	desc_size = (*desc & HDR_JD_LENGTH_MASK) * sizeof(u32);
	desc_dma = dma_map_single(dev, desc, desc_size, DMA_TO_DEVICE);
	if (dma_mapping_error(dev, desc_dma)) {
		dev_err(dev, "caam_jr_enqueue(): can't map jobdesc\n");
		return -EIO;
	}

	spin_lock_bh(&jrp->inplock);

	head = jrp->head;
	tail = ACCESS_ONCE(jrp->tail);

	if (!rd_reg32(&jrp->rregs->inpring_avail) ||
	    CIRC_SPACE(head, tail, JOBR_DEPTH) <= 0) {
		spin_unlock_bh(&jrp->inplock);
		dma_unmap_single(dev, desc_dma, desc_size, DMA_TO_DEVICE);
		return -EBUSY;
	}

	head_entry = &jrp->entinfo[head];
	head_entry->desc_addr_virt = desc;
	head_entry->desc_size = desc_size;
	head_entry->callbk = (void *)cbk;
	head_entry->cbkarg = areq;
	head_entry->desc_addr_dma = desc_dma;

	jrp->inpring[jrp->inp_ring_write_index] = desc_dma;

	smp_wmb();

	jrp->inp_ring_write_index = (jrp->inp_ring_write_index + 1) &
				    (JOBR_DEPTH - 1);
	jrp->head = (head + 1) & (JOBR_DEPTH - 1);

	wr_reg32(&jrp->rregs->inpring_jobadd, 1);

	spin_unlock_bh(&jrp->inplock);

	return 0;
}
Пример #18
0
static void rs_put_char(struct tty_struct *tty, unsigned char ch)
{
    struct async_struct *info = (struct async_struct *)tty->driver_data;
    unsigned long flags;

    if (!tty || !info->xmit.buf) return;

    local_irq_save(flags);
    if (CIRC_SPACE(info->xmit.head, info->xmit.tail, SERIAL_XMIT_SIZE) == 0) {
        local_irq_restore(flags);
        return;
    }
    info->xmit.buf[info->xmit.head] = ch;
    info->xmit.head = (info->xmit.head + 1) & (SERIAL_XMIT_SIZE-1);
    local_irq_restore(flags);
}
Пример #19
0
uint8_t *gmac_dev_get_tx_buffer(gmac_device_t* p_gmac_dev, gmac_quelist_t queue_idx)
{
	volatile gmac_tx_descriptor_t *p_tx_td;

	gmac_queue_t* p_gmac_queue = &p_gmac_dev->gmac_queue_list[queue_idx];
	/* Pointers to the current transmit descriptor */
	p_tx_td = &p_gmac_queue->p_tx_dscr[p_gmac_queue->us_tx_head];

	/* If no free TxTd, forget it */
	if (CIRC_SPACE(p_gmac_queue->us_tx_head, p_gmac_queue->us_tx_tail,
					p_gmac_queue->us_tx_list_size) == 0) {
		if (p_tx_td[p_gmac_queue->us_tx_head].status.val & GMAC_TXD_USED)
			return 0;
	}

	return (uint8_t *)p_tx_td->addr;
}
Пример #20
0
static int nfp_release_mask_id(struct nfp_app *app, u8 mask_id)
{
	struct nfp_flower_priv *priv = app->priv;
	struct circ_buf *ring;

	ring = &priv->mask_ids.mask_id_free_list;
	/* Checking if buffer is full. */
	if (CIRC_SPACE(ring->head, ring->tail, NFP_FLOWER_MASK_ENTRY_RS) == 0)
		return -ENOBUFS;

	memcpy(&ring->buf[ring->head], &mask_id, NFP_FLOWER_MASK_ELEMENT_RS);
	ring->head = (ring->head + NFP_FLOWER_MASK_ELEMENT_RS) %
		     (NFP_FLOWER_MASK_ENTRY_RS * NFP_FLOWER_MASK_ELEMENT_RS);

	priv->mask_ids.last_used[mask_id] = ktime_get();

	return 0;
}
Пример #21
0
size_t
nu__AsyncIO__tx_enqueue(struct nu__AsyncIO *a, const void *src, size_t n, bool overrun)
{
    struct circ_buf *tx_buf = &(a->tx_buf);
    size_t ui;
    producer_enter(a, tx_buf);
    for (ui = 0; ui < n; ++ui) {
        tx_buf->buf[tx_buf->head] = *((const char *)src + ui);
        tx_buf->head = (tx_buf->head + 1) & (a->tx_buf_size - 1);
        if (CIRC_SPACE(tx_buf->head, tx_buf->tail, a->tx_buf_size) < 1) {
            if (!overrun)
                break;
            tx_buf->tail = (tx_buf->head - 1) & (a->tx_buf_size - 1);
        }
    }
    producer_exit(a, tx_buf);
    return ui;
}
Пример #22
0
static int nfp_release_stats_entry(struct nfp_app *app, u32 stats_context_id)
{
	struct nfp_flower_priv *priv = app->priv;
	struct circ_buf *ring;

	ring = &priv->stats_ids.free_list;
	/* Check if buffer is full. */
	if (!CIRC_SPACE(ring->head, ring->tail,
			priv->stats_ring_size * NFP_FL_STATS_ELEM_RS -
			NFP_FL_STATS_ELEM_RS + 1))
		return -ENOBUFS;

	memcpy(&ring->buf[ring->head], &stats_context_id, NFP_FL_STATS_ELEM_RS);
	ring->head = (ring->head + NFP_FL_STATS_ELEM_RS) %
		     (priv->stats_ring_size * NFP_FL_STATS_ELEM_RS);

	return 0;
}
Пример #23
0
static int rs_put_char(struct tty_struct *tty, unsigned char ch)
{
	struct serial_state *info = tty->driver_data;
	unsigned long flags;

	if (!info->xmit.buf)
		return 0;

	local_irq_save(flags);
	if (CIRC_SPACE(info->xmit.head, info->xmit.tail, SERIAL_XMIT_SIZE) == 0) {
		local_irq_restore(flags);
		return 0;
	}
	info->xmit.buf[info->xmit.head] = ch;
	info->xmit.head = (info->xmit.head + 1) & (SERIAL_XMIT_SIZE-1);
	local_irq_restore(flags);
	return 1;
}
Пример #24
0
/**
 * drm_crtc_add_crc_entry - Add entry with CRC information for a frame
 * @crtc: CRTC to which the frame belongs
 * @has_frame: whether this entry has a frame number to go with
 * @frame: number of the frame these CRCs are about
 * @crcs: array of CRC values, with length matching #drm_crtc_crc.values_cnt
 *
 * For each frame, the driver polls the source of CRCs for new data and calls
 * this function to add them to the buffer from where userspace reads.
 */
int drm_crtc_add_crc_entry(struct drm_crtc *crtc, bool has_frame,
			   uint32_t frame, uint32_t *crcs)
{
	struct drm_crtc_crc *crc = &crtc->crc;
	struct drm_crtc_crc_entry *entry;
	int head, tail;

	spin_lock(&crc->lock);

	/* Caller may not have noticed yet that userspace has stopped reading */
	if (!crc->entries) {
		spin_unlock(&crc->lock);
		return -EINVAL;
	}

	head = crc->head;
	tail = crc->tail;

	if (CIRC_SPACE(head, tail, DRM_CRC_ENTRIES_NR) < 1) {
		bool was_overflow = crc->overflow;

		crc->overflow = true;
		spin_unlock(&crc->lock);

		if (!was_overflow)
			DRM_ERROR("Overflow of CRC buffer, userspace reads too slow.\n");

		return -ENOBUFS;
	}

	entry = &crc->entries[head];
	entry->frame = frame;
	entry->has_frame_counter = has_frame;
	memcpy(&entry->crcs, crcs, sizeof(*crcs) * crc->values_cnt);

	head = (head + 1) & (DRM_CRC_ENTRIES_NR - 1);
	crc->head = head;

	spin_unlock(&crc->lock);

	wake_up_interruptible(&crc->wq);

	return 0;
}
Пример #25
0
static int spi_write(struct bathos_pipe *pipe, const char *buf, int len)
{
	struct spi_data *data = &spi_data;
	int i;
	int s = CIRC_SPACE(data->cbuftx.head, data->cbuftx.tail,
		SPI_BUF_SIZE);
	int l = min(len, s);

	if (!l)
		return -EAGAIN;

	for (i = 0; i < l; i++) {
		data->buftx[data->cbuftx.head] = buf[i];
		data->cbuftx.head = (data->cbuftx.head + 1)
			& (SPI_BUF_SIZE - 1);
	}

	gpio_set(SPI_INTERRUPT_PIN, 1);

	return l;
}
Пример #26
0
int  lego_pru_write_bytes(int port, unsigned char *pdata, int size)
{
  struct circ_buf *buf;
  int space, index;

  buf = &soft_uart->write_buf[port];

  // Save data into transmit ring buffer
  space = CIRC_SPACE(buf->head, buf->tail, BUFFER_SIZE);

  if (space < size) size = space;

  for(index = 0; index < size; index++)
  {
    buf->buf[buf->head] = pdata[index];
    buf->head = (buf->head + 1) & BUFFER_MASK;
  }

  pru_suart_start_tx(&soft_uart->port[port]);
  soft_uart->sensor_inited[port] = 1;
  return size;
}
Пример #27
0
static unsigned _fifo_write(struct m_fifo *q, void *src,
			    unsigned count, copyfunc copy)
{
	unsigned n;
	unsigned head = *q->head;
	unsigned tail = *q->tail;
	unsigned size = q->size;

	if (CIRC_SPACE(head, tail, size) < count)
		return 0;

	n = CIRC_SPACE_TO_END(head, tail, size);

	if (likely(n >= count)) {
		copy(q->data + head, src, count);
	} else {
		copy(q->data + head, src, n);
		copy(q->data, src + n, count - n);
	}
	//*q->head = (head + count) & (size - 1);

	return count;
}
Пример #28
0
int produce_item(struct circ_buf *ring, char *data, int  count)
{
    int len = 0;
    int left,i,size;
    int to_end_space=0;

    if ( (size=CIRC_SPACE(ring->head, ring->tail, CIRC_BUF_SIZE)) >= 1 )
    {
        left = len = count<=size ? count : size;
        to_end_space = CIRC_SPACE_TO_END(ring->head, ring->tail, CIRC_BUF_SIZE);

        if(left > to_end_space)
        { 
            memcpy(&(ring->buf[ring->head]), data, to_end_space); 
            for(i=0; i<to_end_space; i++) 
            { 
                printf("produec_item %02d bytes: ring->buf[%02d]=%d\n", to_end_space, ring->head+i, ring->buf[ring->head+i]); 
            }
            ring->head = (ring->head + to_end_space) & (CIRC_BUF_SIZE - 1); 
            left -= to_end_space; 
        }
        else
        { 
            to_end_space = 0;
        }

        memcpy(&(ring->buf[ring->head]), &data[to_end_space], left);
        for(i=0; i<left; i++)
        {
           printf("produec_item %02d bytes: ring->buf[%02d]=%d\n", left, ring->head+i, ring->buf[ring->head+i]);
        }
        ring->head = (ring->head + left) & (CIRC_BUF_SIZE - 1);
    }
    printf("-----------------------------------------------------------------------------------------------\n");

    return len;
}
Пример #29
0
/**
 * \brief GMAC Interrupt handler.
 *
 * \param p_gmac_dev   Pointer to GMAC device instance.
 */
void gmac_handler(gmac_device_t* p_gmac_dev)
{
	Gmac *p_hw = p_gmac_dev->p_hw;

	gmac_tx_descriptor_t *p_tx_td;
	gmac_dev_tx_cb_t *p_tx_cb;
	volatile uint32_t ul_isr;
	volatile uint32_t ul_rsr;
	volatile uint32_t ul_tsr;
	uint32_t ul_rx_status_flag;
	uint32_t ul_tx_status_flag;

	ul_isr = gmac_get_interrupt_status(p_hw);
	ul_rsr = gmac_get_rx_status(p_hw);
	ul_tsr = gmac_get_tx_status(p_hw);

	ul_isr &= ~(gmac_get_interrupt_mask(p_hw) | 0xF8030300);

	/* RX packet */
	if ((ul_isr & GMAC_ISR_RCOMP) || (ul_rsr & GMAC_RSR_REC)) {
		ul_rx_status_flag = GMAC_RSR_REC;

		/* Check OVR */
		if (ul_rsr & GMAC_RSR_RXOVR) {
			ul_rx_status_flag |= GMAC_RSR_RXOVR;
		}
		/* Check BNA */
		if (ul_rsr & GMAC_RSR_BNA) {
			ul_rx_status_flag |= GMAC_RSR_BNA;
		}
		/* Clear status */
		gmac_clear_rx_status(p_hw, ul_rx_status_flag);

		/* Invoke callbacks */
		if (p_gmac_dev->func_rx_cb) {
			p_gmac_dev->func_rx_cb(ul_rx_status_flag);
		}
	}

	/* TX packet */
	if ((ul_isr & GMAC_ISR_TCOMP) || (ul_tsr & GMAC_TSR_TXCOMP)) {

		ul_tx_status_flag = GMAC_TSR_TXCOMP;

		/* A frame transmitted */

		/* Check RLE */
		if (ul_tsr & GMAC_TSR_RLE) {
			/* Status RLE & Number of discarded buffers */
			ul_tx_status_flag = GMAC_TSR_RLE | CIRC_CNT(p_gmac_dev->us_tx_head,
					p_gmac_dev->us_tx_tail, p_gmac_dev->us_tx_list_size);
			p_tx_cb = &p_gmac_dev->func_tx_cb_list[p_gmac_dev->us_tx_tail];
			gmac_reset_tx_mem(p_gmac_dev);
			gmac_enable_transmit(p_hw, 1);
		}
		/* Check COL */
		if (ul_tsr & GMAC_TSR_COL) {
			ul_tx_status_flag |= GMAC_TSR_COL;
		}
		/* Check UND */
		if (ul_tsr & GMAC_TSR_UND) {
			ul_tx_status_flag |= GMAC_TSR_UND;
		}
		/* Clear status */
		gmac_clear_tx_status(p_hw, ul_tx_status_flag);

		if (!CIRC_EMPTY(p_gmac_dev->us_tx_head, p_gmac_dev->us_tx_tail)) {
			/* Check the buffers */
			do {
				p_tx_td = &p_gmac_dev->p_tx_dscr[p_gmac_dev->us_tx_tail];
				p_tx_cb = &p_gmac_dev->func_tx_cb_list[p_gmac_dev->us_tx_tail];
				/* Any error? Exit if buffer has not been sent yet */
				if ((p_tx_td->status.val & GMAC_TXD_USED) == 0) {
					break;
				}

				/* Notify upper layer that a packet has been sent */
				if (*p_tx_cb) {
					(*p_tx_cb) (ul_tx_status_flag);
				}

				circ_inc(&p_gmac_dev->us_tx_tail, p_gmac_dev->us_tx_list_size);
			} while (CIRC_CNT(p_gmac_dev->us_tx_head, p_gmac_dev->us_tx_tail,
							p_gmac_dev->us_tx_list_size));
		}

		if (ul_tsr & GMAC_TSR_RLE) {
			/* Notify upper layer RLE */
			if (*p_tx_cb) {
				(*p_tx_cb) (ul_tx_status_flag);
			}
		}

		/* If a wakeup has been scheduled, notify upper layer that it can
		   send other packets, and the sending will be successful. */
		if ((CIRC_SPACE(p_gmac_dev->us_tx_head, p_gmac_dev->us_tx_tail,
				p_gmac_dev->us_tx_list_size) >= p_gmac_dev->uc_wakeup_threshold)
				&& p_gmac_dev->func_wakeup_cb) {
			p_gmac_dev->func_wakeup_cb();
		}
	}
}
Пример #30
0
static int qtnf_pcie_pearl_rx_poll(struct napi_struct *napi, int budget)
{
	struct qtnf_bus *bus = container_of(napi, struct qtnf_bus, mux_napi);
	struct qtnf_pcie_pearl_state *ps = get_bus_priv(bus);
	struct qtnf_pcie_bus_priv *priv = &ps->base;
	struct net_device *ndev = NULL;
	struct sk_buff *skb = NULL;
	int processed = 0;
	struct qtnf_pearl_rx_bd *rxbd;
	dma_addr_t skb_paddr;
	int consume;
	u32 descw;
	u32 psize;
	u16 r_idx;
	u16 w_idx;
	int ret;

	while (processed < budget) {
		if (!qtnf_rx_data_ready(ps))
			goto rx_out;

		r_idx = priv->rx_bd_r_index;
		rxbd = &ps->rx_bd_vbase[r_idx];
		descw = le32_to_cpu(rxbd->info);

		skb = priv->rx_skb[r_idx];
		psize = QTN_GET_LEN(descw);
		consume = 1;

		if (!(descw & QTN_TXDONE_MASK)) {
			pr_warn("skip invalid rxbd[%d]\n", r_idx);
			consume = 0;
		}

		if (!skb) {
			pr_warn("skip missing rx_skb[%d]\n", r_idx);
			consume = 0;
		}

		if (skb && (skb_tailroom(skb) <  psize)) {
			pr_err("skip packet with invalid length: %u > %u\n",
			       psize, skb_tailroom(skb));
			consume = 0;
		}

		if (skb) {
			skb_paddr = QTN_HOST_ADDR(le32_to_cpu(rxbd->addr_h),
						  le32_to_cpu(rxbd->addr));
			pci_unmap_single(priv->pdev, skb_paddr, SKB_BUF_SIZE,
					 PCI_DMA_FROMDEVICE);
		}

		if (consume) {
			skb_put(skb, psize);
			ndev = qtnf_classify_skb(bus, skb);
			if (likely(ndev)) {
				qtnf_update_rx_stats(ndev, skb);
				skb->protocol = eth_type_trans(skb, ndev);
				napi_gro_receive(napi, skb);
			} else {
				pr_debug("drop untagged skb\n");
				bus->mux_dev.stats.rx_dropped++;
				dev_kfree_skb_any(skb);
			}
		} else {
			if (skb) {
				bus->mux_dev.stats.rx_dropped++;
				dev_kfree_skb_any(skb);
			}
		}

		priv->rx_skb[r_idx] = NULL;
		if (++r_idx >= priv->rx_bd_num)
			r_idx = 0;

		priv->rx_bd_r_index = r_idx;

		/* repalce processed buffer by a new one */
		w_idx = priv->rx_bd_w_index;
		while (CIRC_SPACE(priv->rx_bd_w_index, priv->rx_bd_r_index,
				  priv->rx_bd_num) > 0) {
			if (++w_idx >= priv->rx_bd_num)
				w_idx = 0;

			ret = pearl_skb2rbd_attach(ps, w_idx);
			if (ret) {
				pr_err("failed to allocate new rx_skb[%d]\n",
				       w_idx);
				break;
			}
		}

		processed++;
	}

rx_out:
	if (processed < budget) {
		napi_complete(napi);
		qtnf_en_rxdone_irq(ps);
	}

	return processed;
}