Beispiel #1
0
Datei: vc.c Projekt: AoLaD/rtems
/*
 * When cache is enabled then content of buffer exchanged
 * with VideoCore has to be propagated through ARM11/Cortex-A7
 * caches
 */
static inline void bcm2835_mailbox_buffer_flush_and_invalidate(
  void  *buf,
  size_t size
)
{
  rtems_cache_flush_multiple_data_lines( buf, size );
  rtems_cache_invalidate_multiple_data_lines( buf, size );
}
int
m8xx_uart_pollRead(
    int minor
)
{
    unsigned char c;

    if (RxBd[minor]->status & M8xx_BD_EMPTY) {
        return -1;
    }
    rtems_cache_invalidate_multiple_data_lines(
        (const void *) RxBd[minor]->buffer,
        RxBd[minor]->length
    );
    c = ((char *)RxBd[minor]->buffer)[0];
    RxBd[minor]->status = M8xx_BD_EMPTY | M8xx_BD_WRAP;
    return c;
}
static bool txFree(struct bfin_ethernetSoftc *sc, int index) {
  bool       freed;
  txStatusT *status;

  freed = false;
  if (sc->tx[index].inUse) {
    status = (txStatusT *) sc->tx[index].status.addr;
    rtems_cache_invalidate_multiple_data_lines(status, sizeof(*status));
    if (status->status != 0) {
      /* update statistics */

      sc->tx[index].inUse = false;
      freed = true;
    }
  }

  return freed;
}
Beispiel #4
0
/*
 * polled scc read function
 */
static int
sccPollRead (int minor)
{
  int c = -1;
  int chan = minor;

  while(1) {
    if ((sccCurrRxBd[chan]->status & M8xx_BD_EMPTY) != 0) {
      return -1;
    }

    if (0 == (sccCurrRxBd[chan]->status & (M8xx_BD_OVERRUN
					   | M8xx_BD_PARITY_ERROR
					   | M8xx_BD_FRAMING_ERROR
					   | M8xx_BD_BREAK
					   | M8xx_BD_IDLE))) {
      /* character received and no error detected */
      rtems_cache_invalidate_multiple_data_lines((void *)sccCurrRxBd[chan]->buffer,
						 sccCurrRxBd[chan]->length);
      c = (unsigned)*((char *)sccCurrRxBd[chan]->buffer);
      /*
       * clear status
       */
    }
    sccCurrRxBd[chan]->status =
      (sccCurrRxBd[chan]->status
       & (M8xx_BD_WRAP | M8xx_BD_INTERRUPT))
      | M8xx_BD_EMPTY;
    /*
     * advance to next BD
     */
    if ((sccCurrRxBd[chan]->status & M8xx_BD_WRAP) != 0) {
      sccCurrRxBd[chan] = sccFrstRxBd[chan];
    }
    else {
      sccCurrRxBd[chan]++;
    }
    if (c >= 0) {
      return c;
    }
  }
}
static void
m8xx_smc2_interrupt_handler (void *unused)
{
    int nb_overflow;

    /*
     * Buffer received?
     */
    if (m8xx.smc2.smce & M8xx_SMCE_RX) {
        m8xx.smc2.smce = M8xx_SMCE_RX;  /* Clear the event */


        /* Check that the buffer is ours */
        if ((RxBd[SMC2_MINOR]->status & M8xx_BD_EMPTY) == 0) {
            rtems_cache_invalidate_multiple_data_lines(
                (const void *) RxBd[SMC2_MINOR]->buffer,
                RxBd[SMC2_MINOR]->length );
            nb_overflow = rtems_termios_enqueue_raw_characters(
                              (void *)ttyp[SMC2_MINOR],
                              (char *)RxBd[SMC2_MINOR]->buffer,
                              (int)RxBd[SMC2_MINOR]->length );
            RxBd[SMC2_MINOR]->status = M8xx_BD_EMPTY | M8xx_BD_WRAP |
                                       M8xx_BD_INTERRUPT;
        }
    }

    /*
     * Buffer transmitted?
     */
    if (m8xx.smc2.smce & M8xx_SMCE_TX) {
        m8xx.smc2.smce = M8xx_SMCE_TX;    /* Clear the event */

        /* Check that the buffer is ours */
        if ((TxBd[SMC2_MINOR]->status & M8xx_BD_READY) == 0)
            rtems_termios_dequeue_characters (
                (void *)ttyp[SMC2_MINOR],
                (int)TxBd[SMC2_MINOR]->length);
    }
}
Beispiel #6
0
static void dwmac_desc_enh_init_rx_desc(
  dwmac_common_context *self,
  const unsigned int    index )
{
  volatile dwmac_desc_ext *p_enh       =
    (volatile dwmac_desc_ext *) self->dma_rx;
  const size_t             NUM_DESCS   = (size_t) self->bsd_config->rbuf_count;
  char                    *clust_start =
    mtod( self->mbuf_addr_rx[index], char * );


  assert( NULL != p_enh );

  DWMAC_COMMON_DSB();

  rtems_cache_invalidate_multiple_data_lines(
    clust_start,
    DWMAC_DESC_COM_BUF_SIZE + ETHER_ALIGN
    );

  if ( self->mbuf_addr_rx[index] != NULL ) {
    p_enh[index].erx.des0_3.des1 = DWMAC_DESC_ERX_DES1_RECEIVE_BUFF_1_SIZE_SET(
      p_enh->erx.des0_3.des1,
      DWMAC_DESC_COM_BUF_SIZE );
  } else {
    p_enh[index].erx.des0_3.des1 = DWMAC_DESC_ERX_DES1_RECEIVE_BUFF_1_SIZE_SET(
      p_enh->erx.des0_3.des1,
      0 );
  }

  p_enh[index].erx.des0_3.des2 = (uint32_t) clust_start;

  /* The network controller supports adding a second data buffer to
   * p_enh->erx.des0_3.des3. For simplicity reasons we will not do this */
  dwmac_desc_enh_rx_set_on_ring_chain( &p_enh[index],
                                       ( index == NUM_DESCS - 1 ) );
  DWMAC_COMMON_DSB();
  p_enh[index].erx.des0_3.des0 = DWMAC_DESC_ERX_DES0_OWN_BIT;
}
Beispiel #7
0
static void smp_cache_data_inv(void *arg)
{
  smp_cache_area *area = arg;

  rtems_cache_invalidate_multiple_data_lines(area->addr, area->size);
}
Beispiel #8
0
/**
 * @brief Writes @a n characters from @a out to bus @a bus and synchronously stores the received data in @a in.
 *
 * eDMA channel usage for transmission:
 * @dot
 * digraph push {
 *  push [label="Push Register"];
 *  push_data [label="Push Data"];
 *  idle_push_data [label="Idle Push Data"];
 *  out [shape=box,label="Output Buffer"];
 *  edge [color=red,fontcolor=red];
 *  push -> idle_push_data [label="Transmit Request",URL="\ref mpc55xx_dspi_bus_entry::edma_transmit"];
 *  push -> out [label="Transmit Request",URL="\ref mpc55xx_dspi_bus_entry::edma_transmit"];
 *  out -> push_data [label="Channel Link",URL="\ref mpc55xx_dspi_bus_entry::edma_push"];
 *  edge [color=blue,fontcolor=blue];
 *  out -> push_data [label="Data"];
 *  push_data -> push [label="Data"];
 *  idle_push_data -> push [label="Data"];
 * }
 * @enddot
 *
 * eDMA channel usage for receiving:
 * @dot
 * digraph pop {
 *  pop [label="Pop Register"];
 *  nirvana [label="Nirvana"];
 *  in [shape=box,label="Input Buffer"];
 *  edge [color=red,fontcolor=red];
 *  pop -> nirvana [label="Receive Request",URL="\ref mpc55xx_dspi_bus_entry::edma_receive"];
 *  pop -> in [label="Receive Request",URL="\ref mpc55xx_dspi_bus_entry::edma_receive"];
 *  edge [color=blue,fontcolor=blue];
 *  pop -> nirvana [label="Data"];
 *  pop -> in [label="Data"];
 * }
 * @enddot
 */
static int mpc55xx_dspi_read_write( rtems_libi2c_bus_t *bus, unsigned char *in, const unsigned char *out, int n)
{
	mpc55xx_dspi_bus_entry *e = (mpc55xx_dspi_bus_entry *) bus;

	/* Non cache aligned characters */
	int n_nc = n;

	/* Cache aligned characters */
	int n_c = 0;

	/* Register addresses */
	volatile void *push = &e->regs->PUSHR.R;
	volatile void *pop = &e->regs->POPR.R;
	volatile union DSPI_SR_tag *status = &e->regs->SR;

	/* Push and pop data */
	union DSPI_PUSHR_tag push_data = e->push_data;
	union DSPI_POPR_tag pop_data;

	/* Status register */
	union DSPI_SR_tag sr;

	/* Read and write indices */
	int r = 0;
	int w = 0;

	if (n == 0) {
		return 0;
	} else if (in == NULL && out == NULL) {
		return -RTEMS_INVALID_ADDRESS;
	}

	if (n > MPC55XX_DSPI_EDMA_MAGIC_SIZE) {
		n_nc = (int) mpc55xx_non_cache_aligned_size( in);
		n_c = (int) mpc55xx_cache_aligned_size( in, (size_t) n);
		if (n_c > EDMA_TCD_BITER_LINKED_SIZE) {
			RTEMS_SYSLOG_WARNING( "buffer size out of range, cannot use eDMA\n");
			n_nc = n;
			n_c = 0;
		} else if (n_nc + n_c != n) {
			RTEMS_SYSLOG_WARNING( "input buffer not proper cache aligned, cannot use eDMA\n");
			n_nc = n;
			n_c = 0;
		}
	}

#ifdef DEBUG
	if (e->regs->SR.B.TXCTR != e->regs->SR.B.RXCTR) {
		RTEMS_SYSLOG_WARNING( "FIFO counter not equal\n");
	}
#endif /* DEBUG */

	/* Direct IO */
	if (out == NULL) {
		push_data.B.TXDATA = e->idle_char;
		while (r < n_nc || w < n_nc) {
			/* Wait for available FIFO */
			do {
				sr.R = status->R;
			} while (sr.B.TXCTR == MPC55XX_DSPI_FIFO_SIZE && sr.B.RXCTR == 0);

			/* Write */
			if (w < n_nc && (w - r) < MPC55XX_DSPI_FIFO_SIZE && sr.B.TXCTR != MPC55XX_DSPI_FIFO_SIZE) {
				++w;
				ppc_write_word( push_data.R, push);
			}

			/* Read */
			if (r < n_nc && sr.B.RXCTR != 0) {
				pop_data.R = ppc_read_word( pop);
				in [r] = (unsigned char) pop_data.B.RXDATA;
				++r;
			}
		}
	} else if (in == NULL) {
		while (r < n_nc || w < n_nc) {
			/* Wait for available FIFO */
			do {
				sr.R = status->R;
			} while (sr.B.TXCTR == MPC55XX_DSPI_FIFO_SIZE && sr.B.RXCTR == 0);

			/* Write */
			if (w < n_nc && (w - r) < MPC55XX_DSPI_FIFO_SIZE && sr.B.TXCTR != MPC55XX_DSPI_FIFO_SIZE) {
				push_data.B.TXDATA = out [w];
				++w;
				ppc_write_word( push_data.R, push);
			}

			/* Read */
			if (r < n_nc && sr.B.RXCTR != 0) {
				pop_data.R = ppc_read_word( pop);
				++r;
			}
		}
	} else {
		while (r < n_nc || w < n_nc) {
			/* Wait for available FIFO */
			do {
				sr.R = status->R;
			} while (sr.B.TXCTR == MPC55XX_DSPI_FIFO_SIZE && sr.B.RXCTR == 0);

			/* Write */
			if (w < n_nc && (w - r) < MPC55XX_DSPI_FIFO_SIZE && sr.B.TXCTR != MPC55XX_DSPI_FIFO_SIZE) {
				push_data.B.TXDATA = out [w];
				++w;
				ppc_write_word( push_data.R, push);
			}

			/* Read */
			if (r < n_nc && sr.B.RXCTR != 0) {
				pop_data.R = ppc_read_word( pop);
				in [r] = (unsigned char) pop_data.B.RXDATA;
				++r;
			}
		}
	}

	/* eDMA transfers */
	if (n_c > 0) {
		rtems_status_code sc = RTEMS_SUCCESSFUL;
		unsigned char *in_c = in + n_nc;
		const unsigned char *out_c = out + n_nc;
		struct tcd_t tcd_transmit = EDMA_TCD_DEFAULT;
		struct tcd_t tcd_receive = EDMA_TCD_DEFAULT;

		/* Cache operations */
		rtems_cache_flush_multiple_data_lines( out_c, (size_t) n_c);
		rtems_cache_invalidate_multiple_data_lines( in_c, (size_t) n_c);

		/* Set transmit TCD */
		if (out == NULL) {
			e->push_data.B.TXDATA = e->idle_char;
			mpc55xx_dspi_store_push_data( e);
			tcd_transmit.SADDR = mpc55xx_dspi_push_data_address( e);
			tcd_transmit.SDF.B.SSIZE = 2;
			tcd_transmit.SDF.B.SOFF = 0;
			tcd_transmit.DADDR = (uint32_t) push;
			tcd_transmit.SDF.B.DSIZE = 2;
			tcd_transmit.CDF.B.DOFF = 0;
			tcd_transmit.NBYTES = 4;
			tcd_transmit.CDF.B.CITER = n_c;
			tcd_transmit.BMF.B.BITER = n_c;
		} else {
			unsigned push_channel = mpc55xx_edma_channel_by_tcd( e->edma_push.edma.edma_tcd);
			mpc55xx_edma_clear_done( e->edma_transmit.edma.edma_tcd);
			tcd_transmit.SADDR = (uint32_t) out_c;
			tcd_transmit.SDF.B.SSIZE = 0;
			tcd_transmit.SDF.B.SOFF = 1;
			tcd_transmit.DADDR = mpc55xx_dspi_push_data_address( e) + 3;
			tcd_transmit.SDF.B.DSIZE = 0;
			tcd_transmit.CDF.B.DOFF = 0;
			tcd_transmit.NBYTES = 1;
			tcd_transmit.CDF.B.CITERE_LINK = 1;
			tcd_transmit.BMF.B.BITERE_LINK = 1;
			tcd_transmit.BMF.B.MAJORLINKCH = push_channel;
			tcd_transmit.CDF.B.CITER = EDMA_TCD_LINK_AND_BITER( push_channel, n_c);
			tcd_transmit.BMF.B.BITER = EDMA_TCD_LINK_AND_BITER( push_channel, n_c);
			tcd_transmit.BMF.B.MAJORE_LINK = 1;
		}
		tcd_transmit.BMF.B.D_REQ = 1;
		tcd_transmit.BMF.B.INT_MAJ = 1;
		*e->edma_transmit.edma.edma_tcd = tcd_transmit;

		/* Set receive TCD */
		if (in == NULL) {
			tcd_receive.CDF.B.DOFF = 0;
			tcd_receive.DADDR = mpc55xx_dspi_nirvana_address( e);
		} else {
			tcd_receive.CDF.B.DOFF = 1;
			tcd_receive.DADDR = (uint32_t) in_c;
		}
		tcd_receive.SADDR = (uint32_t) pop + 3;
		tcd_receive.SDF.B.SSIZE = 0;
		tcd_receive.SDF.B.SOFF = 0;
		tcd_receive.SDF.B.DSIZE = 0;
		tcd_receive.NBYTES = 1;
		tcd_receive.BMF.B.D_REQ = 1;
		tcd_receive.BMF.B.INT_MAJ = 1;
		tcd_receive.CDF.B.CITER = n_c;
		tcd_receive.BMF.B.BITER = n_c;
		*e->edma_receive.edma.edma_tcd = tcd_receive;

		/* Clear request flags */
		sr.R = 0;
		sr.B.TFFF = 1;
		sr.B.RFDF = 1;
		status->R = sr.R;

		/* Enable hardware requests */
		mpc55xx_edma_enable_hardware_requests( e->edma_receive.edma.edma_tcd);
		mpc55xx_edma_enable_hardware_requests( e->edma_transmit.edma.edma_tcd);

		/* Wait for transmit update */
		sc = rtems_semaphore_obtain( e->edma_transmit.id, RTEMS_WAIT, RTEMS_NO_TIMEOUT);
		RTEMS_CHECK_SC_RV( sc, "transmit update");

		/* Wait for receive update */
		sc = rtems_semaphore_obtain( e->edma_receive.id, RTEMS_WAIT, RTEMS_NO_TIMEOUT);
		RTEMS_CHECK_SC_RV( sc, "receive update");
	}

	return n;
}
Beispiel #9
0
/*
 * Interrupt handler
 */
static rtems_isr
sccInterruptHandler (void *arg)
{
  int chan = (int)arg;

  /*
   * Buffer received?
   */
  if (CHN_EVENT_GET(chan) & 0x1) {
    /*
     * clear SCC event flag
     */
    CHN_EVENT_CLR(chan,0x01);
    /*
     * process event
     */
    while ((sccCurrRxBd[chan]->status & M8xx_BD_EMPTY) == 0) {
      if (sccttyp[chan] != NULL) {
	rtems_cache_invalidate_multiple_data_lines((void *)sccCurrRxBd[chan]->buffer,
						   sccCurrRxBd[chan]->length);
	rtems_termios_enqueue_raw_characters (sccttyp[chan],
					      (char *)sccCurrRxBd[chan]->buffer,
					      sccCurrRxBd[chan]->length);
      }
      /*
       * clear status
       */
      sccCurrRxBd[chan]->status =
	(sccCurrRxBd[chan]->status
	 & (M8xx_BD_WRAP | M8xx_BD_INTERRUPT))
	| M8xx_BD_EMPTY;
      /*
       * advance to next BD
       */
      if ((sccCurrRxBd[chan]->status & M8xx_BD_WRAP) != 0) {
	sccCurrRxBd[chan] = sccFrstRxBd[chan];
      }
      else {
	sccCurrRxBd[chan]++;
      }
    }
  }
  /*
   * Buffer transmitted?
   */
  if (CHN_EVENT_GET(chan) & 0x2) {
    /*
     * then clear interrupt event bit
     */
    CHN_EVENT_CLR(chan,0x2);
    /*
     * and signal successful transmit to termios
     */
    /*
     * FIXME: multiple dequeue calls for multiple buffers
     */
    while((sccDequTxBd[chan] != sccPrepTxBd[chan]) &&
	  ((sccDequTxBd[chan]->status & M8xx_BD_READY) == 0)) {
      if (sccttyp[chan] != NULL) {
	rtems_termios_dequeue_characters (sccttyp[chan],
					  sccDequTxBd[chan]->length);
      }
      /*
       * advance to next BD
       */
      if ((sccDequTxBd[chan]->status & M8xx_BD_WRAP) != 0) {
	sccDequTxBd[chan] = sccFrstTxBd[chan];
      }
      else {
	sccDequTxBd[chan]++;
      }
    }
  }
}
Beispiel #10
0
/*
 * reader task
 */
static void
scc_rxDaemon (void *arg)
{
  struct m8260_hdlc_struct *sc = (struct m8260_hdlc_struct *)arg;
  struct ifnet *ifp = &sc->ac_if;
  struct mbuf *m;
  uint16_t         status;
  m8260BufferDescriptor_t *rxBd;
  int rxBdIndex;

  /*
   * Allocate space for incoming packets and start reception
   */
  for (rxBdIndex = 0 ; ;) {
    rxBd = sc->rxBdBase + rxBdIndex;
    MGETHDR (m, M_WAIT, MT_DATA);
    MCLGET (m, M_WAIT);
    m->m_pkthdr.rcvif = ifp;
    sc->rxMbuf[rxBdIndex] = m;
    rxBd->buffer = mtod (m, void *);
    rxBd->status = M8260_BD_EMPTY | M8260_BD_INTERRUPT;
    if (++rxBdIndex == sc->rxBdCount) {
      rxBd->status |= M8260_BD_WRAP;
      break;
    }
  }

/*
  m8260.scc3.sccm |= M8260_SCCE_RXF;
*/

  /*
   * Input packet handling loop
   */
  rxBdIndex = 0;
  for (;;) {
    rxBd = sc->rxBdBase + rxBdIndex;

    /*
     * Wait for packet if there's not one ready
     */
    if ((status = rxBd->status) & M8260_BD_EMPTY) {
      /*
       * Clear old events
       */

      m8260.scc3.scce = M8260_SCCE_RXF;

      /*
       * Wait for packet
       * Note that the buffer descriptor is checked
       * *before* the event wait -- this catches the
       * possibility that a packet arrived between the
       * `if' above, and the clearing of the event register.
       */
      while ((status = rxBd->status) & M8260_BD_EMPTY) {
        rtems_event_set events;

        /*
         * Unmask RXF (Full frame received) event
         */
        m8260.scc3.sccm |= M8260_SCCE_RXF;

/*        printk( "Rxdwait "); */

        rtems_bsdnet_event_receive (INTERRUPT_EVENT,
                                    RTEMS_WAIT|RTEMS_EVENT_ANY,
                                    RTEMS_NO_TIMEOUT,
                                    &events);

/*        printk( "Rxd " ); */
      }
    }

    /*
     * Check that packet is valid
     */
    if ((status & (M8260_BD_LAST |
                   M8260_BD_FIRST_IN_FRAME |
    		   M8260_BD_LONG |
                   M8260_BD_NONALIGNED |
                   M8260_BD_ABORT |
                   M8260_BD_CRC_ERROR |
                   M8260_BD_OVERRUN /*|
                   M8260_BD_CARRIER_LOST*/)) ==
                   (M8260_BD_LAST |
                   M8260_BD_FIRST_IN_FRAME ) ) {

/*      printk( "RxV " ); */

/*
 * Invalidate the buffer for this descriptor
 */

      rtems_cache_invalidate_multiple_data_lines((void *)rxBd->buffer, rxBd->length);

      m = sc->rxMbuf[rxBdIndex];

      /* strip off HDLC CRC */
      m->m_len = m->m_pkthdr.len = rxBd->length - sizeof(uint16_t);

      hdlc_input( ifp, m );

      /*
       * Allocate a new mbuf
       */
      MGETHDR (m, M_WAIT, MT_DATA);
      MCLGET (m, M_WAIT);
      m->m_pkthdr.rcvif = ifp;
      sc->rxMbuf[rxBdIndex] = m;
      rxBd->buffer = mtod (m, void *);
    }
    else {
Beispiel #11
0
/*
 * reader task
 */
static void
scc_rxDaemon (void *arg)
{
  struct m8xx_enet_struct *sc = (struct m8xx_enet_struct *)arg;
  struct ifnet *ifp = &sc->arpcom.ac_if;
  struct mbuf *m;
  uint16_t   status;
  m8xxBufferDescriptor_t *rxBd;
  int rxBdIndex;

  /*
   * Allocate space for incoming packets and start reception
   */
  for (rxBdIndex = 0 ; ;) {
    rxBd = sc->rxBdBase + rxBdIndex;
    MGETHDR (m, M_WAIT, MT_DATA);
    MCLGET (m, M_WAIT);
    m->m_pkthdr.rcvif = ifp;
    sc->rxMbuf[rxBdIndex] = m;
    rxBd->buffer = mtod (m, void *);
    rxBd->status = M8xx_BD_EMPTY | M8xx_BD_INTERRUPT;
    if (++rxBdIndex == sc->rxBdCount) {
      rxBd->status |= M8xx_BD_WRAP;
      break;
    }
  }

  /*
   * Input packet handling loop
   */
  rxBdIndex = 0;
  for (;;) {
    rxBd = sc->rxBdBase + rxBdIndex;

    /*
     * Wait for packet if there's not one ready
     */
    if ((status = rxBd->status) & M8xx_BD_EMPTY) {
      /*
       * Clear old events
       */
      m8xx.scc1.scce = 0x8;

      /*
       * Wait for packet
       * Note that the buffer descriptor is checked
       * *before* the event wait -- this catches the
       * possibility that a packet arrived between the
       * `if' above, and the clearing of the event register.
       */
      while ((status = rxBd->status) & M8xx_BD_EMPTY) {
        rtems_event_set events;

        /*
         * Unmask RXF (Full frame received) event
         */
        m8xx.scc1.sccm |= 0x8;

        rtems_bsdnet_event_receive (INTERRUPT_EVENT,
                                    RTEMS_WAIT|RTEMS_EVENT_ANY,
                                    RTEMS_NO_TIMEOUT,
                                    &events);
      }
    }

    /*
     * Check that packet is valid
     */
    if ((status & (M8xx_BD_LAST |
                   M8xx_BD_FIRST_IN_FRAME |
                   M8xx_BD_LONG |
                   M8xx_BD_NONALIGNED |
                   M8xx_BD_SHORT |
                   M8xx_BD_CRC_ERROR |
                   M8xx_BD_OVERRUN |
                   M8xx_BD_COLLISION)) ==
        (M8xx_BD_LAST |
         M8xx_BD_FIRST_IN_FRAME)) {
      /*
       * Pass the packet up the chain.
       * FIXME: Packet filtering hook could be done here.
       */
      struct ether_header *eh;

      /*
       * Invalidate the buffer for this descriptor
       */
      rtems_cache_invalidate_multiple_data_lines((const void *)rxBd->buffer, rxBd->length);

      m = sc->rxMbuf[rxBdIndex];
      m->m_len = m->m_pkthdr.len = rxBd->length -
        sizeof(uint32_t) -
        sizeof(struct ether_header);
      eh = mtod (m, struct ether_header *);
      m->m_data += sizeof(struct ether_header);
      ether_input (ifp, eh, m);

      /*
       * Allocate a new mbuf
       */
      MGETHDR (m, M_WAIT, MT_DATA);
      MCLGET (m, M_WAIT);
      m->m_pkthdr.rcvif = ifp;
      sc->rxMbuf[rxBdIndex] = m;
      rxBd->buffer = mtod (m, void *);
    }
    else {
      /*
       * Something went wrong with the reception
       */
      if (!(status & M8xx_BD_LAST))
        sc->rxNotLast++;
      if (!(status & M8xx_BD_FIRST_IN_FRAME))
        sc->rxNotFirst++;
      if (status & M8xx_BD_LONG)
        sc->rxGiant++;
      if (status & M8xx_BD_NONALIGNED)
        sc->rxNonOctet++;
      if (status & M8xx_BD_SHORT)
        sc->rxRunt++;
      if (status & M8xx_BD_CRC_ERROR)
        sc->rxBadCRC++;
      if (status & M8xx_BD_OVERRUN)
        sc->rxOverrun++;
      if (status & M8xx_BD_COLLISION)
        sc->rxCollision++;
    }

    /*
     * Reenable the buffer descriptor
     */
    rxBd->status = (status & (M8xx_BD_WRAP | M8xx_BD_INTERRUPT)) |
      M8xx_BD_EMPTY;

    /*
     * Move to next buffer descriptor
     */
    if (++rxBdIndex == sc->rxBdCount)
      rxBdIndex = 0;
  }
Beispiel #12
0
static void
fec_rxDaemon (void *arg)
{
    volatile struct mcf5282_enet_struct *sc = (volatile struct mcf5282_enet_struct *)arg;
    struct ifnet *ifp = (struct ifnet* )&sc->arpcom.ac_if;
    struct mbuf *m;
    volatile uint16_t status;
    volatile mcf5282BufferDescriptor_t *rxBd;
    int rxBdIndex;

    /*
     * Allocate space for incoming packets and start reception
     */
    for (rxBdIndex = 0 ; ;) {
        rxBd = sc->rxBdBase + rxBdIndex;
        MGETHDR(m, M_WAIT, MT_DATA);
        MCLGET(m, M_WAIT);
        m->m_pkthdr.rcvif = ifp;
        sc->rxMbuf[rxBdIndex] = m;
        rxBd->buffer = mtod(m, void *);
        rxBd->status = MCF5282_FEC_RxBD_E;
        if (++rxBdIndex == sc->rxBdCount) {
            rxBd->status |= MCF5282_FEC_RxBD_W;
            break;
        }
    }

    /*
     * Input packet handling loop
     */
    /* Indicate we have some ready buffers available */
    MCF5282_FEC_RDAR = 0;

    rxBdIndex = 0;
    for (;;) {
        rxBd = sc->rxBdBase + rxBdIndex;

        /*
         * Wait for packet if there's not one ready
         */
        if ((status = rxBd->status) & MCF5282_FEC_RxBD_E) {
            /*
             * Clear old events.
             */
            MCF5282_FEC_EIR = MCF5282_FEC_EIR_RXF;

            /*
             * Wait for packet to arrive.
             * Check the buffer descriptor before waiting for the event.
             * This catches the case when a packet arrives between the
             * `if' above, and the clearing of the RXF bit in the EIR.
             */
            while ((status = rxBd->status) & MCF5282_FEC_RxBD_E) {
                rtems_event_set events;
                int level;

                rtems_interrupt_disable(level);
                MCF5282_FEC_EIMR |= MCF5282_FEC_EIMR_RXF;
                rtems_interrupt_enable(level);
                rtems_bsdnet_event_receive (RX_INTERRUPT_EVENT,
                                            RTEMS_WAIT|RTEMS_EVENT_ANY,
                                            RTEMS_NO_TIMEOUT,
                                            &events);
            }
        }

        /*
         * Check that packet is valid
         */
        if (status & MCF5282_FEC_RxBD_L) {
            /*
             * Pass the packet up the chain.
             * FIXME: Packet filtering hook could be done here.
             */
            struct ether_header *eh;
            int len = rxBd->length - sizeof(uint32_t);

            /*
             * Invalidate the cache and push the packet up.
             * The cache is so small that it's more efficient to just
             * invalidate the whole thing unless the packet is very small.
             */
            m = sc->rxMbuf[rxBdIndex];
            if (len < 128)
                rtems_cache_invalidate_multiple_data_lines(m->m_data, len);
            else
                rtems_cache_invalidate_entire_data();
            m->m_len = m->m_pkthdr.len = len - sizeof(struct ether_header);
            eh = mtod(m, struct ether_header *);
            m->m_data += sizeof(struct ether_header);
            ether_input(ifp, eh, m);

            /*
             * Allocate a new mbuf
             */
            MGETHDR(m, M_WAIT, MT_DATA);
            MCLGET(m, M_WAIT);
            m->m_pkthdr.rcvif = ifp;
            sc->rxMbuf[rxBdIndex] = m;
            rxBd->buffer = mtod(m, void *);
        }

        /*
         * Reenable the buffer descriptor
         */
        rxBd->status = (status & MCF5282_FEC_RxBD_W) | MCF5282_FEC_RxBD_E;
        MCF5282_FEC_RDAR = 0;

        /*
         * Move to next buffer descriptor
         */
        if (++rxBdIndex == sc->rxBdCount)
            rxBdIndex = 0;
    }
}
Beispiel #13
0
static void test_data_flush_and_invalidate(void)
{
  if (rtems_cache_get_data_line_size() > 0) {
    rtems_interrupt_lock lock;
    rtems_interrupt_lock_context lock_context;
    volatile int *vdata = &data[0];
    int n = 32;
    int i;
    size_t data_size = n * sizeof(data[0]);
    bool write_through;

    printf("data cache flush and invalidate test\n");

    rtems_interrupt_lock_initialize(&lock, "test");
    rtems_interrupt_lock_acquire(&lock, &lock_context);

    for (i = 0; i < n; ++i) {
      vdata[i] = i;
    }

    rtems_cache_flush_multiple_data_lines(&data[0], data_size);

    for (i = 0; i < n; ++i) {
      rtems_test_assert(vdata[i] == i);
    }

    for (i = 0; i < n; ++i) {
      vdata[i] = ~i;
    }

    rtems_cache_invalidate_multiple_data_lines(&data[0], data_size);

    write_through = vdata[0] == ~0;
    if (write_through) {
      for (i = 0; i < n; ++i) {
        rtems_test_assert(vdata[i] == ~i);
      }
    } else {
      for (i = 0; i < n; ++i) {
        rtems_test_assert(vdata[i] == i);
      }
    }

    for (i = 0; i < n; ++i) {
      vdata[i] = ~i;
    }

    rtems_cache_flush_multiple_data_lines(&data[0], data_size);
    rtems_cache_invalidate_multiple_data_lines(&data[0], data_size);

    for (i = 0; i < n; ++i) {
      rtems_test_assert(vdata[i] == ~i);
    }

    rtems_interrupt_lock_release(&lock, &lock_context);
    rtems_interrupt_lock_destroy(&lock);

    printf(
      "data cache operations by line passed the test (%s cache detected)\n",
      write_through ? "write-through" : "copy-back"
    );
  } else {
    printf(
      "skip data cache flush and invalidate test"
        " due to cache line size of zero\n"
    );
  }

  /* Make sure these are nops */
  rtems_cache_flush_multiple_data_lines(NULL, 0);
  rtems_cache_invalidate_multiple_data_lines(NULL, 0);
}
Beispiel #14
0
static void test_timing(void)
{
  rtems_interrupt_lock lock;
  rtems_interrupt_lock_context lock_context;
  size_t data_size = sizeof(data);
  uint64_t d[3];
  uint32_t cache_level;
  size_t cache_size;

  rtems_interrupt_lock_initialize(&lock, "test");

  printf(
    "data cache line size %zi bytes\n"
    "data cache size %zi bytes\n",
    rtems_cache_get_data_line_size(),
    rtems_cache_get_data_cache_size(0)
  );

  cache_level = 1;
  cache_size = rtems_cache_get_data_cache_size(cache_level);
  while (cache_size > 0) {
    printf(
      "data cache level %" PRIu32 " size %zi bytes\n",
      cache_level,
      cache_size
    );
    ++cache_level;
    cache_size = rtems_cache_get_data_cache_size(cache_level);
  }

  rtems_interrupt_lock_acquire(&lock, &lock_context);

  d[0] = load();
  d[1] = load();
  rtems_cache_flush_entire_data();
  d[2] = load();

  rtems_interrupt_lock_release(&lock, &lock_context);

  printf(
    "load %zi bytes with flush entire data\n"
    "  duration with normal cache %" PRIu64 " ns\n"
    "  duration with warm cache %" PRIu64 " ns\n"
    "  duration with flushed cache %" PRIu64 " ns\n",
    data_size,
    d[0],
    d[1],
    d[2]
  );

  rtems_interrupt_lock_acquire(&lock, &lock_context);

  d[0] = load();
  d[1] = load();
  rtems_cache_flush_multiple_data_lines(&data[0], sizeof(data));
  d[2] = load();

  rtems_interrupt_lock_release(&lock, &lock_context);

  printf(
    "load %zi bytes with flush multiple data\n"
    "  duration with normal cache %" PRIu64 " ns\n"
    "  duration with warm cache %" PRIu64 " ns\n"
    "  duration with flushed cache %" PRIu64 " ns\n",
    data_size,
    d[0],
    d[1],
    d[2]
  );

  rtems_interrupt_lock_acquire(&lock, &lock_context);

  d[0] = load();
  d[1] = load();
  rtems_cache_invalidate_multiple_data_lines(&data[0], sizeof(data));
  d[2] = load();

  rtems_interrupt_lock_release(&lock, &lock_context);

  printf(
    "load %zi bytes with invalidate multiple data\n"
    "  duration with normal cache %" PRIu64 " ns\n"
    "  duration with warm cache %" PRIu64 " ns\n"
    "  duration with invalidated cache %" PRIu64 " ns\n",
    data_size,
    d[0],
    d[1],
    d[2]
  );

  rtems_interrupt_lock_acquire(&lock, &lock_context);

  d[0] = store();
  d[1] = store();
  rtems_cache_flush_entire_data();
  d[2] = store();

  rtems_interrupt_lock_release(&lock, &lock_context);

  printf(
    "store %zi bytes with flush entire data\n"
    "  duration with normal cache %" PRIu64 " ns\n"
    "  duration with warm cache %" PRIu64 " ns\n"
    "  duration with flushed cache %" PRIu64 " ns\n",
    data_size,
    d[0],
    d[1],
    d[2]
  );

  rtems_interrupt_lock_acquire(&lock, &lock_context);

  d[0] = store();
  d[1] = store();
  rtems_cache_flush_multiple_data_lines(&data[0], sizeof(data));
  d[2] = store();

  rtems_interrupt_lock_release(&lock, &lock_context);

  printf(
    "store %zi bytes with flush multiple data\n"
    "  duration with normal cache %" PRIu64 " ns\n"
    "  duration with warm cache %" PRIu64 " ns\n"
    "  duration with flushed cache %" PRIu64 " ns\n",
    data_size,
    d[0],
    d[1],
    d[2]
  );

  rtems_interrupt_lock_acquire(&lock, &lock_context);

  d[0] = store();
  d[1] = store();
  rtems_cache_invalidate_multiple_data_lines(&data[0], sizeof(data));
  d[2] = store();

  rtems_interrupt_lock_release(&lock, &lock_context);

  printf(
    "store %zi bytes with invalidate multiple data\n"
    "  duration with normal cache %" PRIu64 " ns\n"
    "  duration with warm cache %" PRIu64 " ns\n"
    "  duration with invalidated cache %" PRIu64 " ns\n",
    data_size,
    d[0],
    d[1],
    d[2]
  );

  printf(
    "instruction cache line size %zi bytes\n"
    "instruction cache size %zi bytes\n",
    rtems_cache_get_instruction_line_size(),
    rtems_cache_get_instruction_cache_size(0)
  );

  cache_level = 1;
  cache_size = rtems_cache_get_instruction_cache_size(cache_level);
  while (cache_size > 0) {
    printf(
      "instruction cache level %" PRIu32 " size %zi bytes\n",
      cache_level,
      cache_size
    );
    ++cache_level;
    cache_size = rtems_cache_get_instruction_cache_size(cache_level);
  }

  rtems_interrupt_lock_acquire(&lock, &lock_context);

  d[0] = do_some_work();
  d[1] = do_some_work();
  rtems_cache_invalidate_entire_instruction();
  d[2] = do_some_work();

  rtems_interrupt_lock_release(&lock, &lock_context);

  printf(
    "invalidate entire instruction\n"
    "  duration with normal cache %" PRIu64 " ns\n"
    "  duration with warm cache %" PRIu64 " ns\n"
    "  duration with invalidated cache %" PRIu64 " ns\n",
    d[0],
    d[1],
    d[2]
  );

  rtems_interrupt_lock_acquire(&lock, &lock_context);

  d[0] = do_some_work();
  d[1] = do_some_work();
  rtems_cache_invalidate_multiple_instruction_lines(do_some_work, 4096);
  d[2] = do_some_work();

  rtems_interrupt_lock_release(&lock, &lock_context);

  printf(
    "invalidate multiple instruction\n"
    "  duration with normal cache %" PRIu64 " ns\n"
    "  duration with warm cache %" PRIu64 " ns\n"
    "  duration with invalidated cache %" PRIu64 " ns\n",
    d[0],
    d[1],
    d[2]
  );

  rtems_interrupt_lock_destroy(&lock);
}
static void initializeHardware(struct bfin_ethernetSoftc *sc) {
  struct ifnet *ifp;
  struct mbuf *m;
  unsigned char *hwaddr;
  int cacheAlignment;
  int rxStatusSize;
  int txStatusSize;
  char *ptr;
  int i;
  void *ethBase;
  void *rxdmaBase;
  void *txdmaBase;
  uint32_t divisor;

  ifp = &sc->arpcom.ac_if;
  ethBase = sc->ethBase;
  rxdmaBase = sc->rxdmaBase;
  txdmaBase = sc->txdmaBase;

  BFIN_REG32(ethBase, EMAC_OPMODE_OFFSET) = 0;
  BFIN_REG32(ethBase, EMAC_FLC_OFFSET) = 0;
  divisor = (sc->sclk / 25000000) / 2 - 1;
  BFIN_REG32(ethBase, EMAC_SYSCTL_OFFSET) = (divisor <<
                                             EMAC_SYSCTL_MDCDIV_SHIFT) |
                                            EMAC_SYSCTL_RXDWA;
#ifdef BFIN_IPCHECKSUMS
  BFIN_REG32(ethBase, EMAC_SYSCTL_OFFSET) |= EMAC_SYSCTL_RXCKS;
#endif
  BFIN_REG32(ethBase, EMAC_SYSTAT_OFFSET) = ~(uint32_t) 0;
  BFIN_REG32(ethBase, EMAC_RX_IRQE_OFFSET) = 0;
  BFIN_REG32(ethBase, EMAC_RX_STKY_OFFSET) = ~(uint32_t) 0;
  BFIN_REG32(ethBase, EMAC_TX_IRQE_OFFSET) = 0;
  BFIN_REG32(ethBase, EMAC_TX_STKY_OFFSET) = ~(uint32_t) 0;
  BFIN_REG32(ethBase, EMAC_MMC_RIRQE_OFFSET) = 0;
  BFIN_REG32(ethBase, EMAC_MMC_RIRQS_OFFSET) = ~(uint32_t) 0;
  BFIN_REG32(ethBase, EMAC_MMC_TIRQE_OFFSET) = 0;
  BFIN_REG32(ethBase, EMAC_MMC_TIRQS_OFFSET) = ~(uint32_t) 0;
  BFIN_REG32(ethBase, EMAC_MMC_CTL_OFFSET) = EMAC_MMC_CTL_MMCE |
                                             EMAC_MMC_CTL_CCOR |
                                             EMAC_MMC_CTL_RSTC;
  BFIN_REG32(ethBase, EMAC_MMC_CTL_OFFSET) = EMAC_MMC_CTL_MMCE |
                                             EMAC_MMC_CTL_CCOR;

  BFIN_REG16(rxdmaBase, DMA_CONFIG_OFFSET) = 0;
  BFIN_REG16(txdmaBase, DMA_CONFIG_OFFSET) = 0;
  BFIN_REG16(rxdmaBase, DMA_X_COUNT_OFFSET) = 0;
  BFIN_REG16(txdmaBase, DMA_X_COUNT_OFFSET) = 0;
  BFIN_REG16(rxdmaBase, DMA_X_MODIFY_OFFSET) = 4;
  BFIN_REG16(txdmaBase, DMA_X_MODIFY_OFFSET) = 4;
  BFIN_REG16(rxdmaBase, DMA_Y_COUNT_OFFSET) = 0;
  BFIN_REG16(txdmaBase, DMA_Y_COUNT_OFFSET) = 0;
  BFIN_REG16(rxdmaBase, DMA_Y_MODIFY_OFFSET) = 0;
  BFIN_REG16(txdmaBase, DMA_Y_MODIFY_OFFSET) = 0;
  BFIN_REG16(rxdmaBase, DMA_IRQ_STATUS_OFFSET) = DMA_IRQ_STATUS_DMA_ERR |
                                                 DMA_IRQ_STATUS_DMA_DONE;

  /* The status structures cannot share cache lines with anything else,
     including other status structures, so we can safely manage both the
     processor and DMA writing to them.  So this rounds up the structure
     sizes to a multiple of the cache line size. */
  cacheAlignment = rtems_cache_get_data_line_size();
  if (cacheAlignment == 0)
     cacheAlignment = 1;
  rxStatusSize = cacheAlignment * ((sizeof(rxStatusT) + cacheAlignment - 1) /
                                   cacheAlignment);
  txStatusSize = cacheAlignment * ((sizeof(txStatusT) + cacheAlignment - 1) /
                                   cacheAlignment);
  /* Allocate enough extra to allow structures to start at cache aligned
     boundary. */
  sc->status = malloc(sc->rxDescCount * rxStatusSize +
                      sc->txDescCount * txStatusSize +
                      cacheAlignment - 1, M_DEVBUF, M_NOWAIT);
  sc->rx = malloc(sc->rxDescCount * sizeof(*sc->rx), M_DEVBUF, M_NOWAIT);
  sc->tx = malloc(sc->txDescCount * sizeof(*sc->tx), M_DEVBUF, M_NOWAIT);
  if (sc->status == NULL || sc->rx == NULL || sc->tx == NULL)
    rtems_panic("No memory!\n");

  /* Start status structures at cache aligned boundary. */
  ptr = (char *) (((intptr_t) sc->status + cacheAlignment - 1) &
                  ~(cacheAlignment - 1));
  memset(ptr, 0, sc->rxDescCount * rxStatusSize +
                 sc->txDescCount * txStatusSize);
  memset(sc->rx, 0, sc->rxDescCount * sizeof(*sc->rx));
  memset(sc->tx, 0, sc->txDescCount * sizeof(*sc->tx));
  rtems_cache_flush_multiple_data_lines(ptr, sc->rxDescCount * rxStatusSize +
                                             sc->txDescCount * txStatusSize);
  for (i = 0; i < sc->rxDescCount; i++) {
    MGETHDR(m, M_WAIT, MT_DATA);
    MCLGET(m, M_WAIT);
    m->m_pkthdr.rcvif = ifp;
    sc->rx[i].m = m;
    /* start dma at 32 bit boundary */
    sc->rx[i].data.addr = (void *) (((intptr_t) m->m_data + 3) & ~3);
    rtems_cache_invalidate_multiple_data_lines(
        sc->rx[i].data.addr,
        BFIN_ETHERNET_MAX_FRAME_LENGTH + 2);
    sc->rx[i].data.dmaConfig = DMA_MODE_RX;
    sc->rx[i].data.next = &(sc->rx[i].status);
    sc->rx[i].status.addr = ptr;
    if (i < sc->rxDescCount - 1) {
      sc->rx[i].status.dmaConfig = DMA_MODE_STATUS;
      sc->rx[i].status.next = &(sc->rx[i + 1].data);
    } else {
      sc->rx[i].status.dmaConfig = DMA_MODE_STATUS_LAST;
      sc->rx[i].status.next = &(sc->rx[0].data);
    }
    ptr += rxStatusSize;
  }
  rtems_cache_flush_multiple_data_lines(sc->rx, sc->rxDescCount *
                                                sizeof(*sc->rx));
  for (i = 0; i < sc->txDescCount; i++) {
    sc->tx[i].data.addr = &sc->tx[i].buffer.packet;
    sc->tx[i].data.dmaConfig = DMA_MODE_TX;
    sc->tx[i].data.next = &(sc->tx[i].status);
    sc->tx[i].status.addr = ptr;
    sc->tx[i].status.dmaConfig = DMA_MODE_STATUS_LAST;
    if (i < sc->txDescCount - 1)
      sc->tx[i].status.next = &(sc->tx[i + 1].data);
    else
      sc->tx[i].status.next = &(sc->tx[0].data);
    sc->tx[i].inUse = false;
    ptr += txStatusSize;
  }
  rtems_cache_flush_multiple_data_lines(sc->tx, sc->txDescCount *
                                                sizeof(*sc->tx));

  BFIN_REG32(rxdmaBase, DMA_NEXT_DESC_PTR_OFFSET) = (uint32_t) &sc->rx[0].data;
  BFIN_REG32(txdmaBase, DMA_NEXT_DESC_PTR_OFFSET) = (uint32_t) &sc->tx[0].data;

  hwaddr = sc->arpcom.ac_enaddr;
  BFIN_REG16(ethBase, EMAC_ADDRHI_OFFSET) = ((uint16_t) hwaddr[5] << 8) |
                                            hwaddr[4];
  BFIN_REG32(ethBase, EMAC_ADDRLO_OFFSET) = ((uint32_t) hwaddr[3] << 24) |
                                            ((uint32_t) hwaddr[2] << 16) |
                                            ((uint32_t) hwaddr[1] << 8) |
                                            hwaddr[0];

  if (sc->acceptBroadcast)
    BFIN_REG32(ethBase, EMAC_OPMODE_OFFSET) &= ~EMAC_OPMODE_DBF;
  else
    BFIN_REG32(ethBase, EMAC_OPMODE_OFFSET) |= EMAC_OPMODE_DBF;

}
static void rxDaemon(void *arg) {
  struct bfin_ethernetSoftc *sc;
  struct ifnet *ifp;
  struct mbuf *m;
  struct mbuf *rxPacket;
  void *dataPtr;
  rtems_event_set events;
  struct ether_header *eh;
  rxStatusT *status;
  uint32_t rxStatus;
  int head;
  int prevHead;
  int length;
  void *ethBase;
  void *rxdmaBase;

  sc = (struct bfin_ethernetSoftc *) arg;
  rxdmaBase = sc->rxdmaBase;
  ethBase = sc->ethBase;
  ifp = &sc->arpcom.ac_if;
  prevHead = sc->rxDescCount - 1;
  head = 0;

  BFIN_REG16(rxdmaBase, DMA_CONFIG_OFFSET) = DMA_MODE_RX;
  BFIN_REG32(ethBase, EMAC_OPMODE_OFFSET) |= EMAC_OPMODE_RE;

  while (1) {
    status = sc->rx[head].status.addr;
    rtems_cache_invalidate_multiple_data_lines(status, sizeof(*status));
    while (status->status != 0) {
      if (status->status & EMAC_RX_STAT_RX_OK) {
        /* get new cluster to replace this one */
        MGETHDR(m, M_WAIT, MT_DATA);
        MCLGET(m, M_WAIT);
        m->m_pkthdr.rcvif = ifp;
      } else
        m = NULL;

      rxStatus = status->status;
      /* update statistics */


      if (m) {
        /* save received packet to send up a little later */
        rxPacket = sc->rx[head].m;
        dataPtr = sc->rx[head].data.addr;

        /* setup dma for new cluster */
        sc->rx[head].m = m;
        sc->rx[head].data.addr = (void *) (((intptr_t) m->m_data + 3) & ~3);
        /* invalidate cache for new data buffer, in case any lines
           are dirty from previous owner */
        rtems_cache_invalidate_multiple_data_lines(
            sc->rx[head].data.addr,
            BFIN_ETHERNET_MAX_FRAME_LENGTH + 2);
      } else
        rxPacket = NULL;

      sc->rx[head].status.dmaConfig = DMA_MODE_STATUS_LAST;
      rtems_cache_flush_multiple_data_lines(&sc->rx[head],
                                            sizeof(sc->rx[head]));

      /* mark descriptor as empty */
      status->status = 0;
      rtems_cache_flush_multiple_data_lines(&status->status,
                                            sizeof(status->status));

      /* allow dma to continue from previous descriptor into this
         one */
      sc->rx[prevHead].status.dmaConfig = DMA_MODE_STATUS;
      rtems_cache_flush_multiple_data_lines(
          &sc->rx[prevHead].status.dmaConfig,
          sizeof(sc->rx[prevHead].status.dmaConfig));

      if (rxPacket) {
        /* send it up */
        eh = (struct ether_header *) ((intptr_t) dataPtr + 2);
        rxPacket->m_data = (caddr_t) ((intptr_t) dataPtr + 2 + 14);
        length = (rxStatus & EMAC_RX_STAT_RX_FRLEN_MASK) >>
                  EMAC_RX_STAT_RX_FRLEN_SHIFT;
        rxPacket->m_len = length - 14;
        rxPacket->m_pkthdr.len = rxPacket->m_len;
        /* invalidate packet buffer cache again (even though it
           was invalidated prior to giving it to dma engine),
           because speculative reads might cause cache lines to
           be filled at any time */
        rtems_cache_invalidate_multiple_data_lines(eh, length);
        ether_input(ifp, eh, rxPacket);
      }

      if (++prevHead == sc->rxDescCount)
        prevHead = 0;
      if (++head == sc->rxDescCount)
        head = 0;
      status = sc->rx[head].status.addr;
      rtems_cache_invalidate_multiple_data_lines(status, sizeof(*status));
    }

    /* if dma stopped before the next descriptor, restart it */
    if ((BFIN_REG16(rxdmaBase, DMA_IRQ_STATUS_OFFSET) &
         DMA_IRQ_STATUS_DMA_RUN) == 0 &&
        BFIN_REG32(rxdmaBase, DMA_NEXT_DESC_PTR_OFFSET) !=
        (uint32_t) &sc->rx[head].data) {
      BFIN_REG16(rxdmaBase, DMA_CONFIG_OFFSET) = DMA_MODE_RX;
    }

    rtems_bsdnet_event_receive(INTERRUPT_EVENT, RTEMS_WAIT | RTEMS_EVENT_ANY,
                               RTEMS_NO_TIMEOUT, &events);
  }

}