/** Transmit packet * * @v netdev Network device * @v iobuf I/O buffer * @ret rc Return status code */ static int b44_transmit(struct net_device *netdev, struct io_buffer *iobuf) { struct b44_private *bp = netdev_priv(netdev); u32 cur = bp->tx_cur; u32 ctrl; /* Check for TX ring overflow */ if (bp->tx[cur].ctrl) { DBG("tx overflow\n"); return -ENOBUFS; } /* Will call netdev_tx_complete() on the iobuf later */ bp->tx_iobuf[cur] = iobuf; /* Set up TX descriptor */ ctrl = (iob_len(iobuf) & DESC_CTRL_LEN) | DESC_CTRL_IOC | DESC_CTRL_SOF | DESC_CTRL_EOF; if (cur == B44_RING_LAST) ctrl |= DESC_CTRL_EOT; bp->tx[cur].ctrl = cpu_to_le32(ctrl); bp->tx[cur].addr = cpu_to_le32(VIRT_TO_B44(iobuf->data)); /* Update next available descriptor index */ cur = ring_next(cur); bp->tx_cur = cur; wmb(); /* Tell card that a new TX descriptor is ready */ bw32(bp, B44_DMATX_PTR, cur * sizeof(struct dma_desc)); return 0; }
void ring_dump(ringbuffer_t *ring, const char *name) { if (ring_next_size(ring) < 0) { printf("Ring %s is empty\n", name); return; } printf("Data in %s: %d %.*s\n", name, ring_next_size(ring), ring_next_size(ring), (char *) ring_next(ring)); }
static void b44_process_rx_packets(struct b44_private *bp) { struct io_buffer *iob; /* received data */ struct rx_header *rh; u32 pending, i; u16 len; pending = pending_rx_index(bp); for (i = bp->rx_cur; i != pending; i = ring_next(i)) { iob = bp->rx_iobuf[i]; if (iob == NULL) break; rh = iob->data; len = le16_to_cpu(rh->len); /* * Guard against incompletely written RX descriptors. * Without this, things can get really slow! */ if (len == 0) break; /* Discard CRC that is generated by the card */ len -= 4; /* Check for invalid packets and errors */ if (len > RX_PKT_BUF_SZ - RX_PKT_OFFSET || (rh->flags & cpu_to_le16(RX_FLAG_ERRORS))) { DBG("rx error len=%d flags=%04x\n", len, cpu_to_le16(rh->flags)); rh->len = 0; rh->flags = 0; netdev_rx_err(bp->netdev, iob, -EINVAL); continue; } /* Clear RX descriptor */ rh->len = 0; rh->flags = 0; bp->rx_iobuf[i] = NULL; /* Hand off the IO buffer to the network stack */ iob_reserve(iob, RX_PKT_OFFSET); iob_put(iob, len); netdev_rx(bp->netdev, iob); } bp->rx_cur = i; b44_rx_refill(bp, pending_rx_index(bp)); }
/** Recycles sent TX descriptors and notifies network stack * * @v bp Driver state */ static void b44_tx_complete(struct b44_private *bp) { u32 cur, i; cur = pending_tx_index(bp); for (i = bp->tx_dirty; i != cur; i = ring_next(i)) { /* Free finished frame */ netdev_tx_complete(bp->netdev, bp->tx_iobuf[i]); bp->tx_iobuf[i] = NULL; /* Clear TX descriptor */ bp->tx[i].ctrl = 0; bp->tx[i].addr = 0; } bp->tx_dirty = cur; }
/* * Refill RX ring descriptors with buffers. This is needed * because during rx we are passing ownership of descriptor * buffers to the network stack. */ static void b44_rx_refill(struct b44_private *bp, u32 pending) { u32 i; // skip pending for (i = pending + 1; i != bp->rx_cur; i = ring_next(i)) { if (bp->rx_iobuf[i] != NULL) continue; bp->rx_iobuf[i] = alloc_iob(RX_PKT_BUF_SZ); if (!bp->rx_iobuf[i]) { DBG("Refill rx ring failed!!\n"); break; } b44_populate_rx_descriptor(bp, i); } }
void display_work(void) { uint8_t cur_mux, next_mux; if (!display.enabled) return; if (!timer_expired(&display.mux_timer)) return; timer_add(&display.mux_timer, DISPLAY_MUX_PERIOD_MS); cur_mux = display.digit_mux_count; next_mux = ring_next(cur_mux, DISPLAY_NR_DIGITS - 1u); display.digit_mux_count = next_mux; sseg_multiplex(&display.digit_data[cur_mux].sseg, &display.digit_data[next_mux].sseg); }