static void resetHardware(struct bfin_ethernetSoftc *sc) {
  void *ethBase;
  void *rxdmaBase;
  void *txdmaBase;

  ethBase = sc->ethBase;
  rxdmaBase = sc->rxdmaBase;
  txdmaBase = sc->txdmaBase;
  BFIN_REG32(ethBase, EMAC_OPMODE_OFFSET) = 0;
  BFIN_REG16(rxdmaBase, DMA_CONFIG_OFFSET) = 0;
  BFIN_REG16(txdmaBase, DMA_CONFIG_OFFSET) = 0;
}
void bfin_ethernet_txdma_isr(int vector) {
  struct bfin_ethernetSoftc *sc;
  void *txdmaBase;
  uint16_t status;
  int i;

  for (i = 0; i < N_BFIN_ETHERNET; i++) {
    sc = &ethernetSoftc[i];
    txdmaBase = sc->txdmaBase;
    status = BFIN_REG16(txdmaBase, DMA_IRQ_STATUS_OFFSET);
    if (status & DMA_IRQ_STATUS_DMA_DONE)
        rtems_event_send (sc->txDaemonTid, INTERRUPT_EVENT);
    BFIN_REG16(txdmaBase, DMA_IRQ_STATUS_OFFSET) = status;
  }
}
Ejemplo n.º 3
0
Archivo: twi.c Proyecto: RTEMS/rtems
void bfin_twi_isr(int source) {
  void *base;
  int i;
  uint16_t r;
  uint16_t stat;

  for (i = 0; i < N_BFIN_TWI; i++) {
    base = twi[i].base;
    if (base) {
      stat = BFIN_REG16(base, TWI_INT_STAT_OFFSET);
      if (stat) {
        BFIN_REG16(base, TWI_INT_STAT_OFFSET) = stat;
        if ((stat & TWI_INT_STAT_SINIT) && !twi[i].slaveActive) {
          twi[i].slaveActive = true;
          r = BFIN_REG16(base, TWI_FIFO_CTL_OFFSET);
          BFIN_REG16(base, TWI_FIFO_CTL_OFFSET) = r | TWI_FIFO_CTL_XMTFLUSH;
          BFIN_REG16(base, TWI_FIFO_CTL_OFFSET) = r;
          r = BFIN_REG16(base, TWI_SLAVE_CTL_OFFSET);
          BFIN_REG16(base, TWI_SLAVE_CTL_OFFSET) = r | TWI_SLAVE_CTL_STDVAL;
        }
        if (twi[i].slaveActive) {


          if (stat & (TWI_INT_STAT_SCOMP | TWI_INT_STAT_SERR)) {


            r = BFIN_REG16(base, TWI_SLAVE_CTL_OFFSET);
            BFIN_REG16(base, TWI_SLAVE_CTL_OFFSET) = r & ~TWI_SLAVE_CTL_STDVAL;
            twi[i].slaveActive = false;


          }
        }
        if (twi[i].masterActive && !twi[i].slaveActive) {


          if (stat & (TWI_INT_STAT_MCOMP | TWI_INT_STAT_MERR)) {
            if (!(stat & TWI_INT_STAT_MERR)) {


              rtems_semaphore_release(twi[i].irqSem);


            } else
              rtems_semaphore_release(twi[i].irqSem);
          }
        }
      }
    }
  }
}
Ejemplo n.º 4
0
Archivo: twi.c Proyecto: RTEMS/rtems
rtems_status_code bfin_twi_register_callback(int channel,
                                             bfin_twi_callback_t callback,
                                             void *arg) {
  void *base;
  int level;

  if (channel < 0 || channel >= N_BFIN_TWI)
    return RTEMS_INVALID_NUMBER;

  base = twi[channel].base;
  if (callback == NULL)
    BFIN_REG16(base, TWI_SLAVE_CTL_OFFSET) = 0;
  rtems_interrupt_disable(level);
  twi[channel].callback = callback;
  twi[channel].callbackArg = arg;
  rtems_interrupt_enable(level);
  if (callback != NULL)
    BFIN_REG16(base, TWI_SLAVE_CTL_OFFSET) = TWI_SLAVE_CTL_GEN |
                                             TWI_SLAVE_CTL_SEN;

  return RTEMS_SUCCESSFUL;
}
Ejemplo n.º 5
0
Archivo: twi.c Proyecto: RTEMS/rtems
rtems_status_code bfin_twi_init(int channel, bfin_twi_config_t *config) {
  rtems_status_code result;
  void *base;

  if (channel < 0 || channel >= N_BFIN_TWI)
    return RTEMS_INVALID_NUMBER;

  base = config->base;
  twi[channel].base = base;

  result = rtems_semaphore_create(rtems_build_name('t','w','i','s'),
                                  0,
                                  RTEMS_FIFO |
                                  RTEMS_SIMPLE_BINARY_SEMAPHORE |
                                  RTEMS_NO_INHERIT_PRIORITY |
                                  RTEMS_NO_PRIORITY_CEILING |
                                  RTEMS_LOCAL,
                                  0,
                                  &twi[channel].irqSem);
  result = rtems_semaphore_create(rtems_build_name('t','w','i','m'),
                                  1,
                                  RTEMS_PRIORITY |
                                  RTEMS_SIMPLE_BINARY_SEMAPHORE |
                                  RTEMS_INHERIT_PRIORITY |
                                  RTEMS_NO_PRIORITY_CEILING |
                                  RTEMS_LOCAL,
                                  0,
                                  &twi[channel].mutex);
  BFIN_REG16(base, TWI_CONTROL_OFFSET) =
      (uint16_t) (((config->sclk +9999999) / 10000000) <<
                  TWI_CONTROL_PRESCALE_SHIFT) |
      TWI_CONTROL_TWI_ENA;
  BFIN_REG16(base, TWI_CLKDIV_OFFSET) = config->fast ?
                                        ((8 << TWI_CLKDIV_CLKHI_SHIFT) |
                                         (17 << TWI_CLKDIV_CLKLOW_SHIFT)) :
                                        ((33 << TWI_CLKDIV_CLKHI_SHIFT) |
                                         (67 << TWI_CLKDIV_CLKLOW_SHIFT));
  BFIN_REG16(base, TWI_SLAVE_CTL_OFFSET) = 0;
  BFIN_REG16(base, TWI_MASTER_CTL_OFFSET) = config->fast ?
                                            TWI_MASTER_CTL_FAST :
                                            0;
  BFIN_REG16(base, TWI_SLAVE_ADDR_OFFSET) = (uint16_t) config->slave_address <<
                                            TWI_SLAVE_ADDR_SADDR_SHIFT;
  BFIN_REG16(base, TWI_MASTER_STAT_OFFSET) = TWI_MASTER_STAT_BUFWRERR |
                                             TWI_MASTER_STAT_BUFRDERR |
                                             TWI_MASTER_STAT_DNAK |
                                             TWI_MASTER_STAT_ANAK |
                                             TWI_MASTER_STAT_LOSTARB;
  BFIN_REG16(base, TWI_FIFO_CTL_OFFSET) = TWI_FIFO_CTL_XMTFLUSH |
                                          TWI_FIFO_CTL_RCVFLUSH;
  BFIN_REG16(base, TWI_FIFO_CTL_OFFSET) = 0;
  BFIN_REG16(base, TWI_INT_STAT_OFFSET) = TWI_INT_STAT_RCVSERV |
                                          TWI_INT_STAT_XMTSERV |
                                          TWI_INT_STAT_MERR |
                                          TWI_INT_STAT_MCOMP |
                                          TWI_INT_STAT_SOVF |
                                          TWI_INT_STAT_SERR |
                                          TWI_INT_STAT_SCOMP |
                                          TWI_INT_STAT_SINIT;
  BFIN_REG16(base, TWI_INT_MASK_OFFSET) = TWI_INT_MASK_RCVSERVM |
                                          TWI_INT_MASK_XMTSERVM;

  return result;
}
Ejemplo n.º 6
0
Archivo: twi.c Proyecto: RTEMS/rtems
rtems_status_code bfin_twi_request(int channel, uint8_t address,
                                   bfin_twi_request_t *request,
                                   rtems_interval timeout) {
  rtems_status_code result;
  void *base;
  rtems_interrupt_level level;
  uint16_t r;
  uint16_t masterMode;

  if (channel < 0 || channel >= N_BFIN_TWI)
    return RTEMS_INVALID_NUMBER;
  result = rtems_semaphore_obtain(twi[channel].mutex,
                                  RTEMS_WAIT, RTEMS_NO_TIMEOUT);
  if (result == RTEMS_SUCCESSFUL) {
    base = twi[channel].base;
    twi[channel].req = request;

    if (request->write) {
      twi[channel].dataPtr = request->data;
      twi[channel].count = request->count;
    } else
      twi[channel].count = 0;

    BFIN_REG16(base, TWI_MASTER_ADDR_OFFSET) = (uint16_t) address <<
                                               TWI_MASTER_ADDR_MADDR_SHIFT;
    masterMode = BFIN_REG16(base, TWI_MASTER_CTL_OFFSET);
    masterMode |= (request->count << TWI_MASTER_CTL_DCNT_SHIFT);
    if (request->next)
      masterMode |= TWI_MASTER_CTL_RSTART;
    if (!request->write)
      masterMode |= TWI_MASTER_CTL_MDIR;
    masterMode |= TWI_MASTER_CTL_MEN;
    rtems_interrupt_disable(level);
    if (!twi[channel].slaveActive) {
      r = BFIN_REG16(base, TWI_FIFO_CTL_OFFSET);
      BFIN_REG16(base, TWI_FIFO_CTL_OFFSET) = r | TWI_FIFO_CTL_XMTFLUSH;
      BFIN_REG16(base, TWI_FIFO_CTL_OFFSET) = r;
      if (request->write) {
        while (twi[channel].count &&
               (BFIN_REG16(base, TWI_FIFO_STAT_OFFSET) &
                TWI_FIFO_STAT_XMTSTAT_MASK) !=
               TWI_FIFO_STAT_XMTSTAT_FULL) {
          BFIN_REG16(base, TWI_XMT_DATA8_OFFSET) =
              (uint16_t) *twi[channel].dataPtr++;
          twi[channel].count--;
        }
      }
      twi[channel].masterActive = true;
      BFIN_REG16(base, TWI_MASTER_CTL_OFFSET) = masterMode;
    } else {
      twi[channel].masterActive = false;
      twi[channel].masterResult = -1; /* BISON (code should be equiv to lost arbitration) */
    }
    rtems_interrupt_enable(level);
    while (result == RTEMS_SUCCESSFUL && twi[channel].masterActive)
      result = rtems_semaphore_obtain(twi[channel].irqSem,
                                      RTEMS_WAIT, timeout);
    if (result == RTEMS_SUCCESSFUL)
      result = twi[channel].masterResult;
    else {
      /* BISON abort */



    }
    rtems_semaphore_release(twi[channel].mutex);
  }
  return result;
}
static void initializeHardware(struct bfin_ethernetSoftc *sc) {
  struct ifnet *ifp;
  struct mbuf *m;
  unsigned char *hwaddr;
  int cacheAlignment;
  int rxStatusSize;
  int txStatusSize;
  char *ptr;
  int i;
  void *ethBase;
  void *rxdmaBase;
  void *txdmaBase;
  uint32_t divisor;

  ifp = &sc->arpcom.ac_if;
  ethBase = sc->ethBase;
  rxdmaBase = sc->rxdmaBase;
  txdmaBase = sc->txdmaBase;

  BFIN_REG32(ethBase, EMAC_OPMODE_OFFSET) = 0;
  BFIN_REG32(ethBase, EMAC_FLC_OFFSET) = 0;
  divisor = (sc->sclk / 25000000) / 2 - 1;
  BFIN_REG32(ethBase, EMAC_SYSCTL_OFFSET) = (divisor <<
                                             EMAC_SYSCTL_MDCDIV_SHIFT) |
                                            EMAC_SYSCTL_RXDWA;
#ifdef BFIN_IPCHECKSUMS
  BFIN_REG32(ethBase, EMAC_SYSCTL_OFFSET) |= EMAC_SYSCTL_RXCKS;
#endif
  BFIN_REG32(ethBase, EMAC_SYSTAT_OFFSET) = ~(uint32_t) 0;
  BFIN_REG32(ethBase, EMAC_RX_IRQE_OFFSET) = 0;
  BFIN_REG32(ethBase, EMAC_RX_STKY_OFFSET) = ~(uint32_t) 0;
  BFIN_REG32(ethBase, EMAC_TX_IRQE_OFFSET) = 0;
  BFIN_REG32(ethBase, EMAC_TX_STKY_OFFSET) = ~(uint32_t) 0;
  BFIN_REG32(ethBase, EMAC_MMC_RIRQE_OFFSET) = 0;
  BFIN_REG32(ethBase, EMAC_MMC_RIRQS_OFFSET) = ~(uint32_t) 0;
  BFIN_REG32(ethBase, EMAC_MMC_TIRQE_OFFSET) = 0;
  BFIN_REG32(ethBase, EMAC_MMC_TIRQS_OFFSET) = ~(uint32_t) 0;
  BFIN_REG32(ethBase, EMAC_MMC_CTL_OFFSET) = EMAC_MMC_CTL_MMCE |
                                             EMAC_MMC_CTL_CCOR |
                                             EMAC_MMC_CTL_RSTC;
  BFIN_REG32(ethBase, EMAC_MMC_CTL_OFFSET) = EMAC_MMC_CTL_MMCE |
                                             EMAC_MMC_CTL_CCOR;

  BFIN_REG16(rxdmaBase, DMA_CONFIG_OFFSET) = 0;
  BFIN_REG16(txdmaBase, DMA_CONFIG_OFFSET) = 0;
  BFIN_REG16(rxdmaBase, DMA_X_COUNT_OFFSET) = 0;
  BFIN_REG16(txdmaBase, DMA_X_COUNT_OFFSET) = 0;
  BFIN_REG16(rxdmaBase, DMA_X_MODIFY_OFFSET) = 4;
  BFIN_REG16(txdmaBase, DMA_X_MODIFY_OFFSET) = 4;
  BFIN_REG16(rxdmaBase, DMA_Y_COUNT_OFFSET) = 0;
  BFIN_REG16(txdmaBase, DMA_Y_COUNT_OFFSET) = 0;
  BFIN_REG16(rxdmaBase, DMA_Y_MODIFY_OFFSET) = 0;
  BFIN_REG16(txdmaBase, DMA_Y_MODIFY_OFFSET) = 0;
  BFIN_REG16(rxdmaBase, DMA_IRQ_STATUS_OFFSET) = DMA_IRQ_STATUS_DMA_ERR |
                                                 DMA_IRQ_STATUS_DMA_DONE;

  /* The status structures cannot share cache lines with anything else,
     including other status structures, so we can safely manage both the
     processor and DMA writing to them.  So this rounds up the structure
     sizes to a multiple of the cache line size. */
  cacheAlignment = rtems_cache_get_data_line_size();
  if (cacheAlignment == 0)
     cacheAlignment = 1;
  rxStatusSize = cacheAlignment * ((sizeof(rxStatusT) + cacheAlignment - 1) /
                                   cacheAlignment);
  txStatusSize = cacheAlignment * ((sizeof(txStatusT) + cacheAlignment - 1) /
                                   cacheAlignment);
  /* Allocate enough extra to allow structures to start at cache aligned
     boundary. */
  sc->status = malloc(sc->rxDescCount * rxStatusSize +
                      sc->txDescCount * txStatusSize +
                      cacheAlignment - 1, M_DEVBUF, M_NOWAIT);
  sc->rx = malloc(sc->rxDescCount * sizeof(*sc->rx), M_DEVBUF, M_NOWAIT);
  sc->tx = malloc(sc->txDescCount * sizeof(*sc->tx), M_DEVBUF, M_NOWAIT);
  if (sc->status == NULL || sc->rx == NULL || sc->tx == NULL)
    rtems_panic("No memory!\n");

  /* Start status structures at cache aligned boundary. */
  ptr = (char *) (((intptr_t) sc->status + cacheAlignment - 1) &
                  ~(cacheAlignment - 1));
  memset(ptr, 0, sc->rxDescCount * rxStatusSize +
                 sc->txDescCount * txStatusSize);
  memset(sc->rx, 0, sc->rxDescCount * sizeof(*sc->rx));
  memset(sc->tx, 0, sc->txDescCount * sizeof(*sc->tx));
  rtems_cache_flush_multiple_data_lines(ptr, sc->rxDescCount * rxStatusSize +
                                             sc->txDescCount * txStatusSize);
  for (i = 0; i < sc->rxDescCount; i++) {
    MGETHDR(m, M_WAIT, MT_DATA);
    MCLGET(m, M_WAIT);
    m->m_pkthdr.rcvif = ifp;
    sc->rx[i].m = m;
    /* start dma at 32 bit boundary */
    sc->rx[i].data.addr = (void *) (((intptr_t) m->m_data + 3) & ~3);
    rtems_cache_invalidate_multiple_data_lines(
        sc->rx[i].data.addr,
        BFIN_ETHERNET_MAX_FRAME_LENGTH + 2);
    sc->rx[i].data.dmaConfig = DMA_MODE_RX;
    sc->rx[i].data.next = &(sc->rx[i].status);
    sc->rx[i].status.addr = ptr;
    if (i < sc->rxDescCount - 1) {
      sc->rx[i].status.dmaConfig = DMA_MODE_STATUS;
      sc->rx[i].status.next = &(sc->rx[i + 1].data);
    } else {
      sc->rx[i].status.dmaConfig = DMA_MODE_STATUS_LAST;
      sc->rx[i].status.next = &(sc->rx[0].data);
    }
    ptr += rxStatusSize;
  }
  rtems_cache_flush_multiple_data_lines(sc->rx, sc->rxDescCount *
                                                sizeof(*sc->rx));
  for (i = 0; i < sc->txDescCount; i++) {
    sc->tx[i].data.addr = &sc->tx[i].buffer.packet;
    sc->tx[i].data.dmaConfig = DMA_MODE_TX;
    sc->tx[i].data.next = &(sc->tx[i].status);
    sc->tx[i].status.addr = ptr;
    sc->tx[i].status.dmaConfig = DMA_MODE_STATUS_LAST;
    if (i < sc->txDescCount - 1)
      sc->tx[i].status.next = &(sc->tx[i + 1].data);
    else
      sc->tx[i].status.next = &(sc->tx[0].data);
    sc->tx[i].inUse = false;
    ptr += txStatusSize;
  }
  rtems_cache_flush_multiple_data_lines(sc->tx, sc->txDescCount *
                                                sizeof(*sc->tx));

  BFIN_REG32(rxdmaBase, DMA_NEXT_DESC_PTR_OFFSET) = (uint32_t) &sc->rx[0].data;
  BFIN_REG32(txdmaBase, DMA_NEXT_DESC_PTR_OFFSET) = (uint32_t) &sc->tx[0].data;

  hwaddr = sc->arpcom.ac_enaddr;
  BFIN_REG16(ethBase, EMAC_ADDRHI_OFFSET) = ((uint16_t) hwaddr[5] << 8) |
                                            hwaddr[4];
  BFIN_REG32(ethBase, EMAC_ADDRLO_OFFSET) = ((uint32_t) hwaddr[3] << 24) |
                                            ((uint32_t) hwaddr[2] << 16) |
                                            ((uint32_t) hwaddr[1] << 8) |
                                            hwaddr[0];

  if (sc->acceptBroadcast)
    BFIN_REG32(ethBase, EMAC_OPMODE_OFFSET) &= ~EMAC_OPMODE_DBF;
  else
    BFIN_REG32(ethBase, EMAC_OPMODE_OFFSET) |= EMAC_OPMODE_DBF;

}
static void rxDaemon(void *arg) {
  struct bfin_ethernetSoftc *sc;
  struct ifnet *ifp;
  struct mbuf *m;
  struct mbuf *rxPacket;
  void *dataPtr;
  rtems_event_set events;
  struct ether_header *eh;
  rxStatusT *status;
  uint32_t rxStatus;
  int head;
  int prevHead;
  int length;
  void *ethBase;
  void *rxdmaBase;

  sc = (struct bfin_ethernetSoftc *) arg;
  rxdmaBase = sc->rxdmaBase;
  ethBase = sc->ethBase;
  ifp = &sc->arpcom.ac_if;
  prevHead = sc->rxDescCount - 1;
  head = 0;

  BFIN_REG16(rxdmaBase, DMA_CONFIG_OFFSET) = DMA_MODE_RX;
  BFIN_REG32(ethBase, EMAC_OPMODE_OFFSET) |= EMAC_OPMODE_RE;

  while (1) {
    status = sc->rx[head].status.addr;
    rtems_cache_invalidate_multiple_data_lines(status, sizeof(*status));
    while (status->status != 0) {
      if (status->status & EMAC_RX_STAT_RX_OK) {
        /* get new cluster to replace this one */
        MGETHDR(m, M_WAIT, MT_DATA);
        MCLGET(m, M_WAIT);
        m->m_pkthdr.rcvif = ifp;
      } else
        m = NULL;

      rxStatus = status->status;
      /* update statistics */


      if (m) {
        /* save received packet to send up a little later */
        rxPacket = sc->rx[head].m;
        dataPtr = sc->rx[head].data.addr;

        /* setup dma for new cluster */
        sc->rx[head].m = m;
        sc->rx[head].data.addr = (void *) (((intptr_t) m->m_data + 3) & ~3);
        /* invalidate cache for new data buffer, in case any lines
           are dirty from previous owner */
        rtems_cache_invalidate_multiple_data_lines(
            sc->rx[head].data.addr,
            BFIN_ETHERNET_MAX_FRAME_LENGTH + 2);
      } else
        rxPacket = NULL;

      sc->rx[head].status.dmaConfig = DMA_MODE_STATUS_LAST;
      rtems_cache_flush_multiple_data_lines(&sc->rx[head],
                                            sizeof(sc->rx[head]));

      /* mark descriptor as empty */
      status->status = 0;
      rtems_cache_flush_multiple_data_lines(&status->status,
                                            sizeof(status->status));

      /* allow dma to continue from previous descriptor into this
         one */
      sc->rx[prevHead].status.dmaConfig = DMA_MODE_STATUS;
      rtems_cache_flush_multiple_data_lines(
          &sc->rx[prevHead].status.dmaConfig,
          sizeof(sc->rx[prevHead].status.dmaConfig));

      if (rxPacket) {
        /* send it up */
        eh = (struct ether_header *) ((intptr_t) dataPtr + 2);
        rxPacket->m_data = (caddr_t) ((intptr_t) dataPtr + 2 + 14);
        length = (rxStatus & EMAC_RX_STAT_RX_FRLEN_MASK) >>
                  EMAC_RX_STAT_RX_FRLEN_SHIFT;
        rxPacket->m_len = length - 14;
        rxPacket->m_pkthdr.len = rxPacket->m_len;
        /* invalidate packet buffer cache again (even though it
           was invalidated prior to giving it to dma engine),
           because speculative reads might cause cache lines to
           be filled at any time */
        rtems_cache_invalidate_multiple_data_lines(eh, length);
        ether_input(ifp, eh, rxPacket);
      }

      if (++prevHead == sc->rxDescCount)
        prevHead = 0;
      if (++head == sc->rxDescCount)
        head = 0;
      status = sc->rx[head].status.addr;
      rtems_cache_invalidate_multiple_data_lines(status, sizeof(*status));
    }

    /* if dma stopped before the next descriptor, restart it */
    if ((BFIN_REG16(rxdmaBase, DMA_IRQ_STATUS_OFFSET) &
         DMA_IRQ_STATUS_DMA_RUN) == 0 &&
        BFIN_REG32(rxdmaBase, DMA_NEXT_DESC_PTR_OFFSET) !=
        (uint32_t) &sc->rx[head].data) {
      BFIN_REG16(rxdmaBase, DMA_CONFIG_OFFSET) = DMA_MODE_RX;
    }

    rtems_bsdnet_event_receive(INTERRUPT_EVENT, RTEMS_WAIT | RTEMS_EVENT_ANY,
                               RTEMS_NO_TIMEOUT, &events);
  }

}
static void txDaemon(void *arg) {
  struct bfin_ethernetSoftc *sc;
  struct ifnet *ifp;
  struct mbuf *m, *first;
  rtems_event_set events;
  void *ethBase;
  void *txdmaBase;
  txStatusT *status;
  int head;
  int prevHead;
  int tail;
  int length;
  char *ptr;

  sc = (struct bfin_ethernetSoftc *) arg;
  ifp = &sc->arpcom.ac_if;

  ethBase = sc->ethBase;
  txdmaBase = sc->txdmaBase;
  head = 0;
  prevHead = sc->txDescCount - 1;
  tail = 0;

  while (1) {
    /* wait for packet or isr */
    rtems_bsdnet_event_receive(START_TRANSMIT_EVENT | INTERRUPT_EVENT,
                               RTEMS_EVENT_ANY | RTEMS_WAIT,
                               RTEMS_NO_TIMEOUT, &events);

    /* if no descriptors are available, try to free one.  To reduce
       transmit latency only do one here. */
    if (sc->tx[head].inUse && txFree(sc, tail)) {
      if (++tail == sc->txDescCount)
        tail = 0;
    }
    /* send packets until the queue is empty or we run out of tx
       descriptors */
    while (!sc->tx[head].inUse && (ifp->if_flags & IFF_OACTIVE)) {
      /* get the next mbuf chain to transmit */
      IF_DEQUEUE(&ifp->if_snd, m);
      if (m != NULL) {
        /* copy packet into our buffer */
        ptr = sc->tx[head].buffer.packet.data;
        length = 0;
        first = m;
        while (m && length <= BFIN_ETHERNET_MAX_FRAME_LENGTH) {
          length += m->m_len;
          if (length <= BFIN_ETHERNET_MAX_FRAME_LENGTH)
            memcpy(ptr, m->m_data, m->m_len);
          ptr += m->m_len;
          m = m->m_next;
        }
        m_freem(first); /* all done with mbuf */
        if (length <= BFIN_ETHERNET_MAX_FRAME_LENGTH) {
          sc->tx[head].buffer.packet.length = length;

          /* setup tx dma */
          status = (txStatusT *) sc->tx[head].status.addr;
          status->status = 0;
          sc->tx[head].inUse = true;
          rtems_cache_flush_multiple_data_lines(status, sizeof(*status));

          /* configure dma to stop after sending this packet */
          sc->tx[head].status.dmaConfig = DMA_MODE_STATUS_LAST;
          rtems_cache_flush_multiple_data_lines(
              &sc->tx[head].status.dmaConfig,
              sizeof(sc->tx[head].status.dmaConfig));
          rtems_cache_flush_multiple_data_lines(
              &sc->tx[head].buffer.packet,
              length + sizeof(uint16_t));

          /* modify previous descriptor to let it continue
             automatically */
          sc->tx[prevHead].status.dmaConfig = DMA_MODE_STATUS;
          rtems_cache_flush_multiple_data_lines(
              &sc->tx[prevHead].status.dmaConfig,
              sizeof(sc->tx[prevHead].status.dmaConfig));

          /* restart dma if it stopped before the packet we just
             added.  this is purely to reduce transmit latency,
             as it would be restarted anyway after this loop (and
             needs to be, as there's a very small chance that the
             dma controller had started the last status transfer
             before the new dmaConfig word was written above and
             is still doing that status transfer when we check the
             status below.  this will be caught by the check
             outside the loop as that is guaranteed to run at least
             once after the last dma complete interrupt. */
          if ((BFIN_REG16(txdmaBase, DMA_IRQ_STATUS_OFFSET) &
               DMA_IRQ_STATUS_DMA_RUN) == 0 &&
               BFIN_REG32(txdmaBase, DMA_NEXT_DESC_PTR_OFFSET) !=
               (uint32_t) sc->tx[head].data.next) {
            BFIN_REG16(txdmaBase, DMA_CONFIG_OFFSET) = DMA_MODE_TX;
            BFIN_REG32(ethBase, EMAC_OPMODE_OFFSET) |= EMAC_OPMODE_TE;
          }

          if (++head == sc->txDescCount)
            head = 0;
          if (++prevHead == sc->txDescCount)
            prevHead = 0;

          /* if no descriptors are available, try to free one */
          if (sc->tx[head].inUse && txFree(sc, tail)) {
            if (++tail == sc->txDescCount)
              tail = 0;
          }
        } else {
          /* dropping packet: too large */

        }
      } else {
        /* no packets queued */
        ifp->if_flags &= ~IFF_OACTIVE;
      }
    }

    /* if dma stopped and there's more to do, restart it */
    if ((BFIN_REG16(txdmaBase, DMA_IRQ_STATUS_OFFSET) &
         DMA_IRQ_STATUS_DMA_RUN) == 0 &&
        BFIN_REG32(txdmaBase, DMA_NEXT_DESC_PTR_OFFSET) !=
        (uint32_t) &sc->tx[head].data) {
      BFIN_REG16(txdmaBase, DMA_CONFIG_OFFSET) = DMA_MODE_TX;
      BFIN_REG32(ethBase, EMAC_OPMODE_OFFSET) |= EMAC_OPMODE_TE;
    }

    /* free up any additional tx descriptors */
    while (txFree(sc, tail)) {
      if (++tail == sc->txDescCount)
        tail = 0;
    }
  }
}