/* * Buffer descriptor */ void smap_desc_init(struct smap_softc *sc) { struct smap_desc *d; int i; sc->tx_desc = (void *)SMAP_TXDESC_BASE; sc->rx_desc = (void *)SMAP_RXDESC_BASE; sc->tx_buf_freesize = SMAP_TXBUF_SIZE; sc->tx_fifo_ptr = 0; sc->tx_start_index = 0; sc->tx_done_index = 0; sc->rx_done_index = 0; /* intialize entry */ d = sc->tx_desc; for (i = 0; i < SMAP_DESC_MAX; i++, d++) { d->stat = 0; d->__reserved = 0; d->sz = 0; d->ptr = 0; } d = sc->rx_desc; for (i = 0; i < SMAP_DESC_MAX; i++, d++) { d->stat = SMAP_RXDESC_EMPTY; d->__reserved = 0; d->sz = 0; d->ptr = 0; } _wbflush(); }
void smap_txeof(void *arg) { struct smap_softc *sc = arg; struct ifnet *ifp = &sc->ethercom.ec_if; struct smap_desc *d; int i; FUNC_ENTER(); /* clear the timeout timer. */ ifp->if_timer = 0; /* garbage collect */ for (i = sc->tx_done_index;; i = (i + 1) & 0x3f) { u_int16_t stat; d = &sc->tx_desc[i]; stat = d->stat; if (stat & SMAP_TXDESC_READY) { /* all descriptor processed. */ break; } else if (stat & 0x7fff) { if (stat & (SMAP_TXDESC_ECOLL | SMAP_TXDESC_LCOLL | SMAP_TXDESC_MCOLL | SMAP_TXDESC_SCOLL)) ifp->if_collisions++; else ifp->if_oerrors++; } else { ifp->if_opackets++; } if (sc->tx_desc_cnt == 0) break; sc->tx_buf_freesize += ROUND4(d->sz); sc->tx_desc_cnt--; d->sz = 0; d->ptr = 0; d->stat = 0; _wbflush(); } sc->tx_done_index = i; /* OK to start transmit */ ifp->if_flags &= ~IFF_OACTIVE; FUNC_EXIT(); }
static void idedrv_outs(idecommon_dispatch_t *disp,uint32_t reg,uint8_t *buf,int len) { uint16_t data; while (len > 0) { #ifdef _BYTESWAP_ data = (uint16_t) buf[1] + ((uint16_t) buf[0] << 8); #else data = (uint16_t) buf[0] + ((uint16_t) buf[1] << 8); #endif *((volatile uint16_t *) PHYS_TO_K1(reg+disp->baseaddr)) = data; _wbflush(); buf++; buf++; len--; len--; } }
void smap_start(struct ifnet *ifp) { struct smap_softc *sc = ifp->if_softc; struct smap_desc *d; struct mbuf *m0, *m; u_int8_t *p, *q; u_int32_t *r; int i, sz, pktsz; u_int16_t fifop; u_int16_t r16; KDASSERT(ifp->if_flags & IFF_RUNNING); FUNC_ENTER(); while (1) { IFQ_POLL(&ifp->if_snd, m0); if (m0 == NULL) goto end; pktsz = m0->m_pkthdr.len; KDASSERT(pktsz <= ETHER_MAX_LEN - ETHER_CRC_LEN); sz = ROUND4(pktsz); if (sz > sc->tx_buf_freesize || sc->tx_desc_cnt >= SMAP_DESC_MAX || emac3_tx_done() != 0) { ifp->if_flags |= IFF_OACTIVE; goto end; } IFQ_DEQUEUE(&ifp->if_snd, m0); KDASSERT(m0 != NULL); if (ifp->if_bpf) bpf_mtap(ifp->if_bpf, m0); p = (u_int8_t *)sc->tx_buf; q = p + sz; /* copy to temporary buffer area */ for (m = m0; m != 0; m = m->m_next) { memcpy(p, mtod(m, void *), m->m_len); p += m->m_len; } m_freem(m0); /* zero padding area */ for (; p < q; p++) *p = 0; /* put to FIFO */ fifop = sc->tx_fifo_ptr; KDASSERT((fifop & 3) == 0); _reg_write_2(SMAP_TXFIFO_PTR_REG16, fifop); sc->tx_fifo_ptr = (fifop + sz) & 0xfff; r = sc->tx_buf; for (i = 0; i < sz; i += sizeof(u_int32_t)) *(volatile u_int32_t *)SMAP_TXFIFO_DATA_REG = *r++; _wbflush(); /* put FIFO to EMAC3 */ d = &sc->tx_desc[sc->tx_start_index]; KDASSERT((d->stat & SMAP_TXDESC_READY) == 0); d->sz = pktsz; d->ptr = fifop + SMAP_TXBUF_BASE; d->stat = SMAP_TXDESC_READY | SMAP_TXDESC_GENFCS | SMAP_TXDESC_GENPAD; _wbflush(); sc->tx_buf_freesize -= sz; sc->tx_desc_cnt++; sc->tx_start_index = (sc->tx_start_index + 1) & 0x3f; _reg_write_1(SMAP_TXFIFO_FRAME_INC_REG8, 1); emac3_tx_kick(); r16 = _reg_read_2(SPD_INTR_ENABLE_REG16); if ((r16 & SPD_INTR_TXDNV) == 0) { r16 |= SPD_INTR_TXDNV; _reg_write_2(SPD_INTR_ENABLE_REG16, r16); } } end: /* set watchdog timer */ ifp->if_timer = 5; FUNC_EXIT(); }
void smap_rxeof(void *arg) { struct smap_softc *sc = arg; struct smap_desc *d; struct ifnet *ifp = &sc->ethercom.ec_if; struct mbuf *m; u_int16_t r16, stat; u_int32_t *p; int i, j, sz, rxsz, cnt; FUNC_ENTER(); i = sc->rx_done_index; for (cnt = 0;; cnt++, i = (i + 1) & 0x3f) { m = NULL; d = &sc->rx_desc[i]; stat = d->stat; if ((stat & SMAP_RXDESC_EMPTY) != 0) { break; } else if (stat & 0x7fff) { ifp->if_ierrors++; goto next_packet; } sz = d->sz; rxsz = ROUND4(sz); KDASSERT(sz >= ETHER_ADDR_LEN * 2 + ETHER_TYPE_LEN); KDASSERT(sz <= ETHER_MAX_LEN); /* load data from FIFO */ _reg_write_2(SMAP_RXFIFO_PTR_REG16, d->ptr & 0x3ffc); p = sc->rx_buf; for (j = 0; j < rxsz; j += sizeof(u_int32_t)) { *p++ = _reg_read_4(SMAP_RXFIFO_DATA_REG); } /* put to mbuf */ MGETHDR(m, M_DONTWAIT, MT_DATA); if (m == NULL) { printf("%s: unable to allocate Rx mbuf\n", DEVNAME); ifp->if_ierrors++; goto next_packet; } if (sz > (MHLEN - 2)) { MCLGET(m, M_DONTWAIT); if ((m->m_flags & M_EXT) == 0) { printf("%s: unable to allocate Rx cluster\n", DEVNAME); m_freem(m); m = NULL; ifp->if_ierrors++; goto next_packet; } } m->m_data += 2; /* for alignment */ m->m_pkthdr.rcvif = ifp; m->m_pkthdr.len = m->m_len = sz; memcpy(mtod(m, void *), (void *)sc->rx_buf, sz); next_packet: ifp->if_ipackets++; _reg_write_1(SMAP_RXFIFO_FRAME_DEC_REG8, 1); /* free descriptor */ d->sz = 0; d->ptr = 0; d->stat = SMAP_RXDESC_EMPTY; _wbflush(); if (m != NULL) { if (ifp->if_bpf) bpf_mtap(ifp->if_bpf, m); (*ifp->if_input)(ifp, m); } } sc->rx_done_index = i; r16 = _reg_read_2(SPD_INTR_ENABLE_REG16); if (((r16 & SPD_INTR_RXDNV) == 0) && cnt > 0) { r16 |= SPD_INTR_RXDNV; _reg_write_2(SPD_INTR_ENABLE_REG16, r16); } FUNC_EXIT(); }
static void idedrv_outw(idecommon_dispatch_t *disp,uint32_t reg,uint16_t val) { *((volatile uint16_t *) PHYS_TO_K1(reg+disp->baseaddr)) = val; _wbflush(); }