示例#1
0
void ring_push(ringBufS *pRing, int16_t pVal)
{
	pRing->buf[pRing->head]=pVal;
	pRing->head = MODULO_INC(pRing->head, pRing->size); 

	if (pRing->count < pRing->size)
    {
		++pRing->count;  
    } else {
		pRing->tail = MODULO_INC(pRing->tail , pRing->size); 
	}
}
示例#2
0
/*
 * Reset TX queue when errors are detected
 */
static void tx_error_handler(Gmac *gmac, struct gmac_queue *queue)
{
	struct net_pkt *pkt;
	struct ring_buf *tx_frames = &queue->tx_frames;

	queue->err_tx_flushed_count++;

	/* Stop transmission, clean transmit pipeline and control registers */
	gmac->GMAC_NCR &= ~GMAC_NCR_TXEN;

	/* Free all pkt resources in the TX path */
	while (tx_frames->tail != tx_frames->head) {
		/* Release net buffer to the buffer pool */
		pkt = UINT_TO_POINTER(tx_frames->buf[tx_frames->tail]);
		net_pkt_unref(pkt);
		SYS_LOG_DBG("Dropping pkt %p", pkt);
		MODULO_INC(tx_frames->tail, tx_frames->len);
	}

	/* Reinitialize TX descriptor list */
	k_sem_reset(&queue->tx_desc_sem);
	tx_descriptors_init(gmac, queue);
	for (int i = 0; i < queue->tx_desc_list.len - 1; i++) {
		k_sem_give(&queue->tx_desc_sem);
	}

	/* Restart transmission */
	gmac->GMAC_NCR |=  GMAC_NCR_TXEN;
}
示例#3
0
/*
 * Process successfully sent packets
 */
static void tx_completed(Gmac *gmac, struct gmac_queue *queue)
{
	struct gmac_desc_list *tx_desc_list = &queue->tx_desc_list;
	struct gmac_desc *tx_desc;
	struct net_pkt *pkt;

	__ASSERT(tx_desc_list->buf[tx_desc_list->tail].w1 & GMAC_TXW1_USED,
		 "first buffer of a frame is not marked as own by GMAC");

	while (tx_desc_list->tail != tx_desc_list->head) {

		tx_desc = &tx_desc_list->buf[tx_desc_list->tail];
		MODULO_INC(tx_desc_list->tail, tx_desc_list->len);
		k_sem_give(&queue->tx_desc_sem);

		if (tx_desc->w1 & GMAC_TXW1_LASTBUFFER) {
			/* Release net buffer to the buffer pool */
			pkt = UINT_TO_POINTER(ring_buf_get(&queue->tx_frames));
			net_pkt_unref(pkt);
			SYS_LOG_DBG("Dropping pkt %p", pkt);

			break;
		}
	}
}
示例#4
0
/*
 * Put one 32 bit item into the ring buffer
 */
static void ring_buf_put(struct ring_buf *rb, u32_t val)
{
	rb->buf[rb->head] = val;
	MODULO_INC(rb->head, rb->len);

	__ASSERT(rb->tail != rb->head,
		 "ring buffer overflow");
}
示例#5
0
/*
 * Get one 32 bit item from the ring buffer
 */
static u32_t ring_buf_get(struct ring_buf *rb)
{
	u32_t val;

	__ASSERT(rb->tail != rb->head,
		 "retrieving data from empty ring buffer");

	val = rb->buf[rb->tail];
	MODULO_INC(rb->tail, rb->len);

	return val;
}
示例#6
0
/*
 * Get data from the queue
 */
static int queue_get(struct ring_buf *rb, void **mem_block, size_t *size)
{
	unsigned int key;

	key = irq_lock();

	if (rb->tail == rb->head) {
		/* Ring buffer is empty */
		irq_unlock(key);
		return -ENOMEM;
	}

	*mem_block = rb->buf[rb->tail].mem_block;
	*size = rb->buf[rb->tail].size;
	MODULO_INC(rb->tail, rb->len);

	irq_unlock(key);

	return 0;
}
示例#7
0
/*
 * Put data in the queue
 */
static int queue_put(struct ring_buf *rb, void *mem_block, size_t size)
{
	u16_t head_next;
	unsigned int key;

	key = irq_lock();

	head_next = rb->head;
	MODULO_INC(head_next, rb->len);

	if (head_next == rb->tail) {
		/* Ring buffer is full */
		irq_unlock(key);
		return -ENOMEM;
	}

	rb->buf[rb->head].mem_block = mem_block;
	rb->buf[rb->head].size = size;
	rb->head = head_next;

	irq_unlock(key);

	return 0;
}
示例#8
0
static int eth_tx(struct net_if *iface, struct net_pkt *pkt)
{
	struct device *const dev = net_if_get_device(iface);
	const struct eth_sam_dev_cfg *const cfg = DEV_CFG(dev);
	struct eth_sam_dev_data *const dev_data = DEV_DATA(dev);
	Gmac *gmac = cfg->regs;
	struct gmac_queue *queue = &dev_data->queue_list[0];
	struct gmac_desc_list *tx_desc_list = &queue->tx_desc_list;
	struct gmac_desc *tx_desc;
	struct net_buf *frag;
	u8_t *frag_data;
	u16_t frag_len;
	u32_t err_tx_flushed_count_at_entry = queue->err_tx_flushed_count;
	unsigned int key;

	__ASSERT(pkt, "buf pointer is NULL");
	__ASSERT(pkt->frags, "Frame data missing");

	SYS_LOG_DBG("ETH tx");

	/* First fragment is special - it contains link layer (Ethernet
	 * in our case) header. Modify the data pointer to account for more data
	 * in the beginning of the buffer.
	 */
	net_buf_push(pkt->frags, net_pkt_ll_reserve(pkt));

	frag = pkt->frags;
	while (frag) {
		frag_data = frag->data;
		frag_len = frag->len;

		/* Assure cache coherency before DMA read operation */
		DCACHE_CLEAN(frag_data, frag_len);

		k_sem_take(&queue->tx_desc_sem, K_FOREVER);

		/* The following section becomes critical and requires IRQ lock
		 * / unlock protection only due to the possibility of executing
		 * tx_error_handler() function.
		 */
		key = irq_lock();

		/* Check if tx_error_handler() function was executed */
		if (queue->err_tx_flushed_count != err_tx_flushed_count_at_entry) {
			irq_unlock(key);
			return -EIO;
		}

		tx_desc = &tx_desc_list->buf[tx_desc_list->head];

		/* Update buffer descriptor address word */
		tx_desc->w0 = (u32_t)frag_data;
		/* Guarantee that address word is written before the status
		 * word to avoid race condition.
		 */
		__DMB();  /* data memory barrier */
		/* Update buffer descriptor status word (clear used bit) */
		tx_desc->w1 =
			  (frag_len & GMAC_TXW1_LEN)
			| (!frag->frags ? GMAC_TXW1_LASTBUFFER : 0)
			| (tx_desc_list->head == tx_desc_list->len - 1
			   ? GMAC_TXW1_WRAP : 0);

		/* Update descriptor position */
		MODULO_INC(tx_desc_list->head, tx_desc_list->len);

		__ASSERT(tx_desc_list->head != tx_desc_list->tail,
			 "tx_desc_list overflow");

		irq_unlock(key);

		/* Continue with the rest of fragments (only data) */
		frag = frag->frags;
	}

	key = irq_lock();

	/* Check if tx_error_handler() function was executed */
	if (queue->err_tx_flushed_count != err_tx_flushed_count_at_entry) {
		irq_unlock(key);
		return -EIO;
	}

	/* Ensure the descriptor following the last one is marked as used */
	tx_desc = &tx_desc_list->buf[tx_desc_list->head];
	tx_desc->w1 |= GMAC_TXW1_USED;

	/* Account for a sent frame */
	ring_buf_put(&queue->tx_frames, POINTER_TO_UINT(pkt));

	irq_unlock(key);

	/* Start transmission */
	gmac->GMAC_NCR |= GMAC_NCR_TSTART;

	return 0;
}
示例#9
0
static struct net_pkt *frame_get(struct gmac_queue *queue)
{
	struct gmac_desc_list *rx_desc_list = &queue->rx_desc_list;
	struct gmac_desc *rx_desc;
	struct ring_buf *rx_frag_list = &queue->rx_frag_list;
	struct net_pkt *rx_frame;
	bool frame_is_complete;
	struct net_buf *frag;
	struct net_buf *new_frag;
	struct net_buf *last_frag = NULL;
	u8_t *frag_data;
	u32_t frag_len;
	u32_t frame_len = 0;
	u16_t tail;

	/* Check if there exists a complete frame in RX descriptor list */
	tail = rx_desc_list->tail;
	rx_desc = &rx_desc_list->buf[tail];
	frame_is_complete = false;
	while ((rx_desc->w0 & GMAC_RXW0_OWNERSHIP) && !frame_is_complete) {
		frame_is_complete = (bool)(rx_desc->w1 & GMAC_RXW1_EOF);
		MODULO_INC(tail, rx_desc_list->len);
		rx_desc = &rx_desc_list->buf[tail];
	}
	/* Frame which is not complete can be dropped by GMAC. Do not process
	 * it, even partially.
	 */
	if (!frame_is_complete) {
		return NULL;
	}

	rx_frame = net_pkt_get_reserve_rx(0, K_NO_WAIT);

	/* Process a frame */
	tail = rx_desc_list->tail;
	rx_desc = &rx_desc_list->buf[tail];
	frame_is_complete = false;

	/* TODO: Don't assume first RX fragment will have SOF (Start of frame)
	 * bit set. If SOF bit is missing recover gracefully by dropping
	 * invalid frame.
	 */
	__ASSERT(rx_desc->w1 & GMAC_RXW1_SOF,
		 "First RX fragment is missing SOF bit");

	/* TODO: We know already tail and head indexes of fragments containing
	 * complete frame. Loop over those indexes, don't search for them
	 * again.
	 */
	while ((rx_desc->w0 & GMAC_RXW0_OWNERSHIP) && !frame_is_complete) {
		frag = (struct net_buf *)rx_frag_list->buf[tail];
		frag_data = (u8_t *)(rx_desc->w0 & GMAC_RXW0_ADDR);
		__ASSERT(frag->data == frag_data,
			 "RX descriptor and buffer list desynchronized");
		frame_is_complete = (bool)(rx_desc->w1 & GMAC_RXW1_EOF);
		if (frame_is_complete) {
			frag_len = (rx_desc->w1 & GMAC_TXW1_LEN) - frame_len;
		} else {
			frag_len = CONFIG_NET_BUF_DATA_SIZE;
		}

		frame_len += frag_len;

		/* Link frame fragments only if RX net buffer is valid */
		if (rx_frame != NULL) {
			/* Assure cache coherency after DMA write operation */
			DCACHE_INVALIDATE(frag_data, frag_len);

			/* Get a new data net buffer from the buffer pool */
			new_frag = net_pkt_get_frag(rx_frame, K_NO_WAIT);
			if (new_frag == NULL) {
				queue->err_rx_frames_dropped++;
				net_pkt_unref(rx_frame);
				rx_frame = NULL;
			} else {
				net_buf_add(frag, frag_len);
				if (!last_frag) {
					net_pkt_frag_insert(rx_frame, frag);
				} else {
					net_buf_frag_insert(last_frag, frag);
				}
				last_frag = frag;
				frag = new_frag;
				rx_frag_list->buf[tail] = (u32_t)frag;
			}
		}

		/* Update buffer descriptor status word */
		rx_desc->w1 = 0;
		/* Guarantee that status word is written before the address
		 * word to avoid race condition.
		 */
		__DMB();  /* data memory barrier */
		/* Update buffer descriptor address word */
		rx_desc->w0 =
			  ((u32_t)frag->data & GMAC_RXW0_ADDR)
			| (tail == rx_desc_list->len-1 ? GMAC_RXW0_WRAP : 0);

		MODULO_INC(tail, rx_desc_list->len);
		rx_desc = &rx_desc_list->buf[tail];
	}

	rx_desc_list->tail = tail;
	SYS_LOG_DBG("Frame complete: rx=%p, tail=%d", rx_frame, tail);
	__ASSERT_NO_MSG(frame_is_complete);

	return rx_frame;
}