示例#1
0
文件: bspsmp.c 项目: eeflores/rtems
static void release_processor(uboot_spin_table *spin_table, uint32_t cpu_index)
{
  const Per_CPU_Control *cpu = _Per_CPU_Get_by_index(cpu_index);

  spin_table->pir = cpu_index;
  spin_table->r3_lower = (uint32_t) cpu->interrupt_stack_high;
  spin_table->addr_upper = 0;
  rtems_cache_flush_multiple_data_lines(spin_table, sizeof(*spin_table));
  ppc_synchronize_data();
  spin_table->addr_lower = (uint32_t) _start_secondary_processor;
  rtems_cache_flush_multiple_data_lines(spin_table, sizeof(*spin_table));
}
示例#2
0
文件: bsprestart.c 项目: Fyleo/rtems
void bsp_restart(void *addr)
{
  rtems_interrupt_level level;
  void (*start)(void) = addr;
  #ifdef HAS_UBOOT
    const void *mem_begin = (const void *) bsp_uboot_board_info.bi_memstart;
    size_t mem_size = bsp_uboot_board_info.bi_memsize;
  #else /* HAS_UBOOT */
    const void *mem_begin = bsp_ram_start;
    size_t mem_size = (size_t) bsp_ram_size;
  #endif /* HAS_UBOOT */
  uint32_t hid0;

  rtems_interrupt_disable(level);

  hid0 = PPC_SPECIAL_PURPOSE_REGISTER(HID0);

  if ((hid0 & HID0_DCE) != 0) {
    rtems_cache_flush_multiple_data_lines(mem_begin, mem_size);
  }

  hid0 &= ~(HID0_DCE | HID0_ICE);

  PPC_SET_SPECIAL_PURPOSE_REGISTER(HID0, hid0);

  (*start)();
}
示例#3
0
/*
 * Device-dependent write routine
 * Interrupt-driven devices:
 *	Begin transmission of as many characters as possible (minimum is 1).
 * Polling devices:
 *	Transmit all characters.
 */
static ssize_t
sccInterruptWrite (int minor, const char *buf, size_t len)
{
  int chan = minor;

  if ((sccPrepTxBd[chan]->status & M8xx_BD_READY) == 0) {
    sccPrepTxBd[chan]->buffer = (char *)buf;
    sccPrepTxBd[chan]->length = len;
    rtems_cache_flush_multiple_data_lines((const void *)buf,len);
    /*
     * clear status, set ready bit
     */
    sccPrepTxBd[chan]->status =
      (sccPrepTxBd[chan]->status
       & M8xx_BD_WRAP)
      | M8xx_BD_READY | M8xx_BD_INTERRUPT;
    if ((sccPrepTxBd[chan]->status & M8xx_BD_WRAP) != 0) {
      sccPrepTxBd[chan] = sccFrstTxBd[chan];
    }
    else {
      sccPrepTxBd[chan]++;
    }
  }
  return 0;
}
示例#4
0
static ssize_t
sccPollWrite (int minor, const char *buf, size_t len)
{
  static char txBuf[CONS_CHN_CNT][SCC_TXBD_CNT];
  int chan = minor;
  int bd_used;
  size_t retval = len;

  while (len--) {
    while (sccPrepTxBd[chan]->status & M8xx_BD_READY)
      continue;
    bd_used = sccPrepTxBd[chan]-sccFrstTxBd[chan];
    txBuf[chan][bd_used] = *buf++;
      rtems_cache_flush_multiple_data_lines((const void *)&txBuf[chan][bd_used],
					    sizeof(txBuf[chan][bd_used]));
    sccPrepTxBd[chan]->buffer = &(txBuf[chan][bd_used]);
    sccPrepTxBd[chan]->length = 1;
    sccPrepTxBd[chan]->status =
      (sccPrepTxBd[chan]->status
       & M8xx_BD_WRAP)
      | M8xx_BD_READY;
    if ((sccPrepTxBd[chan]->status & M8xx_BD_WRAP) != 0) {
      sccPrepTxBd[chan] = sccFrstTxBd[chan];
    }
    else {
      sccPrepTxBd[chan]++;
    }
  }
  return retval;
}
示例#5
0
void bsp_uboot_copy_board_info(const bd_t *src)
{
  bd_t *dst = &bsp_uboot_board_info;

  dst = memcpy(dst, src, sizeof(*dst));
  rtems_cache_flush_multiple_data_lines(dst, sizeof(*dst));
}
示例#6
0
文件: vc.c 项目: AoLaD/rtems
/*
 * When cache is enabled then content of buffer exchanged
 * with VideoCore has to be propagated through ARM11/Cortex-A7
 * caches
 */
static inline void bcm2835_mailbox_buffer_flush_and_invalidate(
  void  *buf,
  size_t size
)
{
  rtems_cache_flush_multiple_data_lines( buf, size );
  rtems_cache_invalidate_multiple_data_lines( buf, size );
}
示例#7
0
static BSP_START_TEXT_SECTION void mpc55xx_start_load_nocache_section(void)
{
  mpc55xx_start_load_section(
    bsp_section_nocache_begin,
    bsp_section_nocache_load_begin,
    (size_t) bsp_section_nocache_size
  );
  rtems_cache_flush_multiple_data_lines(
    bsp_section_nocache_begin,
    (size_t) bsp_section_nocache_size
  );
}
/*
 *  TODO: Get a free buffer and set it up.
 */
ssize_t
m8xx_uart_write(
    int minor,
    const char *buf,
    size_t len
)
{
    rtems_cache_flush_multiple_data_lines( buf, len );
    TxBd[minor]->buffer = (char *) buf;
    TxBd[minor]->length = len;
    TxBd[minor]->status = M8xx_BD_READY | M8xx_BD_WRAP | M8xx_BD_INTERRUPT;
    return 0;
}
void _ARMV7M_Set_exception_handler(
  int index,
  ARMV7M_Exception_handler handler
)
{
  if ( _ARMV7M_SCB->vtor [index] != handler ) {
    _ARMV7M_SCB->vtor [index] = handler;
    rtems_cache_flush_multiple_data_lines(
      &_ARMV7M_SCB->vtor [index],
      sizeof(_ARMV7M_SCB->vtor [index])
    );
    rtems_cache_invalidate_multiple_instruction_lines(
      &_ARMV7M_SCB->vtor [index],
      sizeof(_ARMV7M_SCB->vtor [index])
    );
  }
}
ssize_t
m8xx_uart_pollWrite(
    int minor,
    const char *buf,
    size_t len
)
{
    while (len--) {
        while (TxBd[minor]->status & M8xx_BD_READY)
            continue;
        txBuf[minor] = *buf++;
        rtems_cache_flush_multiple_data_lines( (void *)&txBuf[minor], 1 );
        TxBd[minor]->buffer = &txBuf[minor];
        TxBd[minor]->length = 1;
        TxBd[minor]->status = M8xx_BD_READY | M8xx_BD_WRAP;
    }
    return len;
}
示例#11
0
文件: ffi.c 项目: Boereck/jna
ffi_status
ffi_prep_closure_loc (ffi_closure* closure,
                      ffi_cif* cif,
                      void (*fun)(ffi_cif*,void*,void**,void*),
                      void *user_data,
                      void *codeloc)
{
    if (cif->abi != FFI_SYSV)
        return FFI_BAD_ABI;

    *(unsigned short *)closure->tramp = 0x207c;
    *(void **)(closure->tramp + 2) = codeloc;
    *(unsigned short *)(closure->tramp + 6) = 0x4ef9;

    if (
#ifdef __MINT__
        (cif->rtype->type == FFI_TYPE_LONGDOUBLE) ||
#endif
        (((cif->rtype->type == FFI_TYPE_STRUCT)
          && !cif->flags)))
        *(void **)(closure->tramp + 8) = ffi_closure_struct_SYSV;
    else
        *(void **)(closure->tramp + 8) = ffi_closure_SYSV;

#ifdef __rtems__
    rtems_cache_flush_multiple_data_lines( codeloc, FFI_TRAMPOLINE_SIZE );
#elif defined(__MINT__)
    Ssystem(S_FLUSHCACHE, codeloc, FFI_TRAMPOLINE_SIZE);
#else
    syscall(SYS_cacheflush, codeloc, FLUSH_SCOPE_LINE,
            FLUSH_CACHE_BOTH, FFI_TRAMPOLINE_SIZE);
#endif

    closure->cif  = cif;
    closure->user_data = user_data;
    closure->fun  = fun;

    return FFI_OK;
}
示例#12
0
static void smp_cache_data_flush(void *arg)
{
  smp_cache_area *area = arg;

  rtems_cache_flush_multiple_data_lines(area->addr, area->size);
}
示例#13
0
文件: dspi.c 项目: AndroidMarv/rtems
/**
 * @brief Writes @a n characters from @a out to bus @a bus and synchronously stores the received data in @a in.
 *
 * eDMA channel usage for transmission:
 * @dot
 * digraph push {
 *  push [label="Push Register"];
 *  push_data [label="Push Data"];
 *  idle_push_data [label="Idle Push Data"];
 *  out [shape=box,label="Output Buffer"];
 *  edge [color=red,fontcolor=red];
 *  push -> idle_push_data [label="Transmit Request",URL="\ref mpc55xx_dspi_bus_entry::edma_transmit"];
 *  push -> out [label="Transmit Request",URL="\ref mpc55xx_dspi_bus_entry::edma_transmit"];
 *  out -> push_data [label="Channel Link",URL="\ref mpc55xx_dspi_bus_entry::edma_push"];
 *  edge [color=blue,fontcolor=blue];
 *  out -> push_data [label="Data"];
 *  push_data -> push [label="Data"];
 *  idle_push_data -> push [label="Data"];
 * }
 * @enddot
 *
 * eDMA channel usage for receiving:
 * @dot
 * digraph pop {
 *  pop [label="Pop Register"];
 *  nirvana [label="Nirvana"];
 *  in [shape=box,label="Input Buffer"];
 *  edge [color=red,fontcolor=red];
 *  pop -> nirvana [label="Receive Request",URL="\ref mpc55xx_dspi_bus_entry::edma_receive"];
 *  pop -> in [label="Receive Request",URL="\ref mpc55xx_dspi_bus_entry::edma_receive"];
 *  edge [color=blue,fontcolor=blue];
 *  pop -> nirvana [label="Data"];
 *  pop -> in [label="Data"];
 * }
 * @enddot
 */
static int mpc55xx_dspi_read_write( rtems_libi2c_bus_t *bus, unsigned char *in, const unsigned char *out, int n)
{
	mpc55xx_dspi_bus_entry *e = (mpc55xx_dspi_bus_entry *) bus;

	/* Non cache aligned characters */
	int n_nc = n;

	/* Cache aligned characters */
	int n_c = 0;

	/* Register addresses */
	volatile void *push = &e->regs->PUSHR.R;
	volatile void *pop = &e->regs->POPR.R;
	volatile union DSPI_SR_tag *status = &e->regs->SR;

	/* Push and pop data */
	union DSPI_PUSHR_tag push_data = e->push_data;
	union DSPI_POPR_tag pop_data;

	/* Status register */
	union DSPI_SR_tag sr;

	/* Read and write indices */
	int r = 0;
	int w = 0;

	if (n == 0) {
		return 0;
	} else if (in == NULL && out == NULL) {
		return -RTEMS_INVALID_ADDRESS;
	}

	if (n > MPC55XX_DSPI_EDMA_MAGIC_SIZE) {
		n_nc = (int) mpc55xx_non_cache_aligned_size( in);
		n_c = (int) mpc55xx_cache_aligned_size( in, (size_t) n);
		if (n_c > EDMA_TCD_BITER_LINKED_SIZE) {
			RTEMS_SYSLOG_WARNING( "buffer size out of range, cannot use eDMA\n");
			n_nc = n;
			n_c = 0;
		} else if (n_nc + n_c != n) {
			RTEMS_SYSLOG_WARNING( "input buffer not proper cache aligned, cannot use eDMA\n");
			n_nc = n;
			n_c = 0;
		}
	}

#ifdef DEBUG
	if (e->regs->SR.B.TXCTR != e->regs->SR.B.RXCTR) {
		RTEMS_SYSLOG_WARNING( "FIFO counter not equal\n");
	}
#endif /* DEBUG */

	/* Direct IO */
	if (out == NULL) {
		push_data.B.TXDATA = e->idle_char;
		while (r < n_nc || w < n_nc) {
			/* Wait for available FIFO */
			do {
				sr.R = status->R;
			} while (sr.B.TXCTR == MPC55XX_DSPI_FIFO_SIZE && sr.B.RXCTR == 0);

			/* Write */
			if (w < n_nc && (w - r) < MPC55XX_DSPI_FIFO_SIZE && sr.B.TXCTR != MPC55XX_DSPI_FIFO_SIZE) {
				++w;
				ppc_write_word( push_data.R, push);
			}

			/* Read */
			if (r < n_nc && sr.B.RXCTR != 0) {
				pop_data.R = ppc_read_word( pop);
				in [r] = (unsigned char) pop_data.B.RXDATA;
				++r;
			}
		}
	} else if (in == NULL) {
		while (r < n_nc || w < n_nc) {
			/* Wait for available FIFO */
			do {
				sr.R = status->R;
			} while (sr.B.TXCTR == MPC55XX_DSPI_FIFO_SIZE && sr.B.RXCTR == 0);

			/* Write */
			if (w < n_nc && (w - r) < MPC55XX_DSPI_FIFO_SIZE && sr.B.TXCTR != MPC55XX_DSPI_FIFO_SIZE) {
				push_data.B.TXDATA = out [w];
				++w;
				ppc_write_word( push_data.R, push);
			}

			/* Read */
			if (r < n_nc && sr.B.RXCTR != 0) {
				pop_data.R = ppc_read_word( pop);
				++r;
			}
		}
	} else {
		while (r < n_nc || w < n_nc) {
			/* Wait for available FIFO */
			do {
				sr.R = status->R;
			} while (sr.B.TXCTR == MPC55XX_DSPI_FIFO_SIZE && sr.B.RXCTR == 0);

			/* Write */
			if (w < n_nc && (w - r) < MPC55XX_DSPI_FIFO_SIZE && sr.B.TXCTR != MPC55XX_DSPI_FIFO_SIZE) {
				push_data.B.TXDATA = out [w];
				++w;
				ppc_write_word( push_data.R, push);
			}

			/* Read */
			if (r < n_nc && sr.B.RXCTR != 0) {
				pop_data.R = ppc_read_word( pop);
				in [r] = (unsigned char) pop_data.B.RXDATA;
				++r;
			}
		}
	}

	/* eDMA transfers */
	if (n_c > 0) {
		rtems_status_code sc = RTEMS_SUCCESSFUL;
		unsigned char *in_c = in + n_nc;
		const unsigned char *out_c = out + n_nc;
		struct tcd_t tcd_transmit = EDMA_TCD_DEFAULT;
		struct tcd_t tcd_receive = EDMA_TCD_DEFAULT;

		/* Cache operations */
		rtems_cache_flush_multiple_data_lines( out_c, (size_t) n_c);
		rtems_cache_invalidate_multiple_data_lines( in_c, (size_t) n_c);

		/* Set transmit TCD */
		if (out == NULL) {
			e->push_data.B.TXDATA = e->idle_char;
			mpc55xx_dspi_store_push_data( e);
			tcd_transmit.SADDR = mpc55xx_dspi_push_data_address( e);
			tcd_transmit.SDF.B.SSIZE = 2;
			tcd_transmit.SDF.B.SOFF = 0;
			tcd_transmit.DADDR = (uint32_t) push;
			tcd_transmit.SDF.B.DSIZE = 2;
			tcd_transmit.CDF.B.DOFF = 0;
			tcd_transmit.NBYTES = 4;
			tcd_transmit.CDF.B.CITER = n_c;
			tcd_transmit.BMF.B.BITER = n_c;
		} else {
			unsigned push_channel = mpc55xx_edma_channel_by_tcd( e->edma_push.edma.edma_tcd);
			mpc55xx_edma_clear_done( e->edma_transmit.edma.edma_tcd);
			tcd_transmit.SADDR = (uint32_t) out_c;
			tcd_transmit.SDF.B.SSIZE = 0;
			tcd_transmit.SDF.B.SOFF = 1;
			tcd_transmit.DADDR = mpc55xx_dspi_push_data_address( e) + 3;
			tcd_transmit.SDF.B.DSIZE = 0;
			tcd_transmit.CDF.B.DOFF = 0;
			tcd_transmit.NBYTES = 1;
			tcd_transmit.CDF.B.CITERE_LINK = 1;
			tcd_transmit.BMF.B.BITERE_LINK = 1;
			tcd_transmit.BMF.B.MAJORLINKCH = push_channel;
			tcd_transmit.CDF.B.CITER = EDMA_TCD_LINK_AND_BITER( push_channel, n_c);
			tcd_transmit.BMF.B.BITER = EDMA_TCD_LINK_AND_BITER( push_channel, n_c);
			tcd_transmit.BMF.B.MAJORE_LINK = 1;
		}
		tcd_transmit.BMF.B.D_REQ = 1;
		tcd_transmit.BMF.B.INT_MAJ = 1;
		*e->edma_transmit.edma.edma_tcd = tcd_transmit;

		/* Set receive TCD */
		if (in == NULL) {
			tcd_receive.CDF.B.DOFF = 0;
			tcd_receive.DADDR = mpc55xx_dspi_nirvana_address( e);
		} else {
			tcd_receive.CDF.B.DOFF = 1;
			tcd_receive.DADDR = (uint32_t) in_c;
		}
		tcd_receive.SADDR = (uint32_t) pop + 3;
		tcd_receive.SDF.B.SSIZE = 0;
		tcd_receive.SDF.B.SOFF = 0;
		tcd_receive.SDF.B.DSIZE = 0;
		tcd_receive.NBYTES = 1;
		tcd_receive.BMF.B.D_REQ = 1;
		tcd_receive.BMF.B.INT_MAJ = 1;
		tcd_receive.CDF.B.CITER = n_c;
		tcd_receive.BMF.B.BITER = n_c;
		*e->edma_receive.edma.edma_tcd = tcd_receive;

		/* Clear request flags */
		sr.R = 0;
		sr.B.TFFF = 1;
		sr.B.RFDF = 1;
		status->R = sr.R;

		/* Enable hardware requests */
		mpc55xx_edma_enable_hardware_requests( e->edma_receive.edma.edma_tcd);
		mpc55xx_edma_enable_hardware_requests( e->edma_transmit.edma.edma_tcd);

		/* Wait for transmit update */
		sc = rtems_semaphore_obtain( e->edma_transmit.id, RTEMS_WAIT, RTEMS_NO_TIMEOUT);
		RTEMS_CHECK_SC_RV( sc, "transmit update");

		/* Wait for receive update */
		sc = rtems_semaphore_obtain( e->edma_receive.id, RTEMS_WAIT, RTEMS_NO_TIMEOUT);
		RTEMS_CHECK_SC_RV( sc, "receive update");
	}

	return n;
}
示例#14
0
文件: dspi.c 项目: AndroidMarv/rtems
static inline void mpc55xx_dspi_store_push_data( mpc55xx_dspi_bus_entry *e)
{
	mpc55xx_dspi_push_data [e->table_index * 8] = e->push_data.R;
	rtems_cache_flush_multiple_data_lines( &mpc55xx_dspi_push_data [e->table_index * 8], 4);
}
static void rxDaemon(void *arg) {
  struct bfin_ethernetSoftc *sc;
  struct ifnet *ifp;
  struct mbuf *m;
  struct mbuf *rxPacket;
  void *dataPtr;
  rtems_event_set events;
  struct ether_header *eh;
  rxStatusT *status;
  uint32_t rxStatus;
  int head;
  int prevHead;
  int length;
  void *ethBase;
  void *rxdmaBase;

  sc = (struct bfin_ethernetSoftc *) arg;
  rxdmaBase = sc->rxdmaBase;
  ethBase = sc->ethBase;
  ifp = &sc->arpcom.ac_if;
  prevHead = sc->rxDescCount - 1;
  head = 0;

  BFIN_REG16(rxdmaBase, DMA_CONFIG_OFFSET) = DMA_MODE_RX;
  BFIN_REG32(ethBase, EMAC_OPMODE_OFFSET) |= EMAC_OPMODE_RE;

  while (1) {
    status = sc->rx[head].status.addr;
    rtems_cache_invalidate_multiple_data_lines(status, sizeof(*status));
    while (status->status != 0) {
      if (status->status & EMAC_RX_STAT_RX_OK) {
        /* get new cluster to replace this one */
        MGETHDR(m, M_WAIT, MT_DATA);
        MCLGET(m, M_WAIT);
        m->m_pkthdr.rcvif = ifp;
      } else
        m = NULL;

      rxStatus = status->status;
      /* update statistics */


      if (m) {
        /* save received packet to send up a little later */
        rxPacket = sc->rx[head].m;
        dataPtr = sc->rx[head].data.addr;

        /* setup dma for new cluster */
        sc->rx[head].m = m;
        sc->rx[head].data.addr = (void *) (((intptr_t) m->m_data + 3) & ~3);
        /* invalidate cache for new data buffer, in case any lines
           are dirty from previous owner */
        rtems_cache_invalidate_multiple_data_lines(
            sc->rx[head].data.addr,
            BFIN_ETHERNET_MAX_FRAME_LENGTH + 2);
      } else
        rxPacket = NULL;

      sc->rx[head].status.dmaConfig = DMA_MODE_STATUS_LAST;
      rtems_cache_flush_multiple_data_lines(&sc->rx[head],
                                            sizeof(sc->rx[head]));

      /* mark descriptor as empty */
      status->status = 0;
      rtems_cache_flush_multiple_data_lines(&status->status,
                                            sizeof(status->status));

      /* allow dma to continue from previous descriptor into this
         one */
      sc->rx[prevHead].status.dmaConfig = DMA_MODE_STATUS;
      rtems_cache_flush_multiple_data_lines(
          &sc->rx[prevHead].status.dmaConfig,
          sizeof(sc->rx[prevHead].status.dmaConfig));

      if (rxPacket) {
        /* send it up */
        eh = (struct ether_header *) ((intptr_t) dataPtr + 2);
        rxPacket->m_data = (caddr_t) ((intptr_t) dataPtr + 2 + 14);
        length = (rxStatus & EMAC_RX_STAT_RX_FRLEN_MASK) >>
                  EMAC_RX_STAT_RX_FRLEN_SHIFT;
        rxPacket->m_len = length - 14;
        rxPacket->m_pkthdr.len = rxPacket->m_len;
        /* invalidate packet buffer cache again (even though it
           was invalidated prior to giving it to dma engine),
           because speculative reads might cause cache lines to
           be filled at any time */
        rtems_cache_invalidate_multiple_data_lines(eh, length);
        ether_input(ifp, eh, rxPacket);
      }

      if (++prevHead == sc->rxDescCount)
        prevHead = 0;
      if (++head == sc->rxDescCount)
        head = 0;
      status = sc->rx[head].status.addr;
      rtems_cache_invalidate_multiple_data_lines(status, sizeof(*status));
    }

    /* if dma stopped before the next descriptor, restart it */
    if ((BFIN_REG16(rxdmaBase, DMA_IRQ_STATUS_OFFSET) &
         DMA_IRQ_STATUS_DMA_RUN) == 0 &&
        BFIN_REG32(rxdmaBase, DMA_NEXT_DESC_PTR_OFFSET) !=
        (uint32_t) &sc->rx[head].data) {
      BFIN_REG16(rxdmaBase, DMA_CONFIG_OFFSET) = DMA_MODE_RX;
    }

    rtems_bsdnet_event_receive(INTERRUPT_EVENT, RTEMS_WAIT | RTEMS_EVENT_ANY,
                               RTEMS_NO_TIMEOUT, &events);
  }

}
static void initializeHardware(struct bfin_ethernetSoftc *sc) {
  struct ifnet *ifp;
  struct mbuf *m;
  unsigned char *hwaddr;
  int cacheAlignment;
  int rxStatusSize;
  int txStatusSize;
  char *ptr;
  int i;
  void *ethBase;
  void *rxdmaBase;
  void *txdmaBase;
  uint32_t divisor;

  ifp = &sc->arpcom.ac_if;
  ethBase = sc->ethBase;
  rxdmaBase = sc->rxdmaBase;
  txdmaBase = sc->txdmaBase;

  BFIN_REG32(ethBase, EMAC_OPMODE_OFFSET) = 0;
  BFIN_REG32(ethBase, EMAC_FLC_OFFSET) = 0;
  divisor = (sc->sclk / 25000000) / 2 - 1;
  BFIN_REG32(ethBase, EMAC_SYSCTL_OFFSET) = (divisor <<
                                             EMAC_SYSCTL_MDCDIV_SHIFT) |
                                            EMAC_SYSCTL_RXDWA;
#ifdef BFIN_IPCHECKSUMS
  BFIN_REG32(ethBase, EMAC_SYSCTL_OFFSET) |= EMAC_SYSCTL_RXCKS;
#endif
  BFIN_REG32(ethBase, EMAC_SYSTAT_OFFSET) = ~(uint32_t) 0;
  BFIN_REG32(ethBase, EMAC_RX_IRQE_OFFSET) = 0;
  BFIN_REG32(ethBase, EMAC_RX_STKY_OFFSET) = ~(uint32_t) 0;
  BFIN_REG32(ethBase, EMAC_TX_IRQE_OFFSET) = 0;
  BFIN_REG32(ethBase, EMAC_TX_STKY_OFFSET) = ~(uint32_t) 0;
  BFIN_REG32(ethBase, EMAC_MMC_RIRQE_OFFSET) = 0;
  BFIN_REG32(ethBase, EMAC_MMC_RIRQS_OFFSET) = ~(uint32_t) 0;
  BFIN_REG32(ethBase, EMAC_MMC_TIRQE_OFFSET) = 0;
  BFIN_REG32(ethBase, EMAC_MMC_TIRQS_OFFSET) = ~(uint32_t) 0;
  BFIN_REG32(ethBase, EMAC_MMC_CTL_OFFSET) = EMAC_MMC_CTL_MMCE |
                                             EMAC_MMC_CTL_CCOR |
                                             EMAC_MMC_CTL_RSTC;
  BFIN_REG32(ethBase, EMAC_MMC_CTL_OFFSET) = EMAC_MMC_CTL_MMCE |
                                             EMAC_MMC_CTL_CCOR;

  BFIN_REG16(rxdmaBase, DMA_CONFIG_OFFSET) = 0;
  BFIN_REG16(txdmaBase, DMA_CONFIG_OFFSET) = 0;
  BFIN_REG16(rxdmaBase, DMA_X_COUNT_OFFSET) = 0;
  BFIN_REG16(txdmaBase, DMA_X_COUNT_OFFSET) = 0;
  BFIN_REG16(rxdmaBase, DMA_X_MODIFY_OFFSET) = 4;
  BFIN_REG16(txdmaBase, DMA_X_MODIFY_OFFSET) = 4;
  BFIN_REG16(rxdmaBase, DMA_Y_COUNT_OFFSET) = 0;
  BFIN_REG16(txdmaBase, DMA_Y_COUNT_OFFSET) = 0;
  BFIN_REG16(rxdmaBase, DMA_Y_MODIFY_OFFSET) = 0;
  BFIN_REG16(txdmaBase, DMA_Y_MODIFY_OFFSET) = 0;
  BFIN_REG16(rxdmaBase, DMA_IRQ_STATUS_OFFSET) = DMA_IRQ_STATUS_DMA_ERR |
                                                 DMA_IRQ_STATUS_DMA_DONE;

  /* The status structures cannot share cache lines with anything else,
     including other status structures, so we can safely manage both the
     processor and DMA writing to them.  So this rounds up the structure
     sizes to a multiple of the cache line size. */
  cacheAlignment = rtems_cache_get_data_line_size();
  if (cacheAlignment == 0)
     cacheAlignment = 1;
  rxStatusSize = cacheAlignment * ((sizeof(rxStatusT) + cacheAlignment - 1) /
                                   cacheAlignment);
  txStatusSize = cacheAlignment * ((sizeof(txStatusT) + cacheAlignment - 1) /
                                   cacheAlignment);
  /* Allocate enough extra to allow structures to start at cache aligned
     boundary. */
  sc->status = malloc(sc->rxDescCount * rxStatusSize +
                      sc->txDescCount * txStatusSize +
                      cacheAlignment - 1, M_DEVBUF, M_NOWAIT);
  sc->rx = malloc(sc->rxDescCount * sizeof(*sc->rx), M_DEVBUF, M_NOWAIT);
  sc->tx = malloc(sc->txDescCount * sizeof(*sc->tx), M_DEVBUF, M_NOWAIT);
  if (sc->status == NULL || sc->rx == NULL || sc->tx == NULL)
    rtems_panic("No memory!\n");

  /* Start status structures at cache aligned boundary. */
  ptr = (char *) (((intptr_t) sc->status + cacheAlignment - 1) &
                  ~(cacheAlignment - 1));
  memset(ptr, 0, sc->rxDescCount * rxStatusSize +
                 sc->txDescCount * txStatusSize);
  memset(sc->rx, 0, sc->rxDescCount * sizeof(*sc->rx));
  memset(sc->tx, 0, sc->txDescCount * sizeof(*sc->tx));
  rtems_cache_flush_multiple_data_lines(ptr, sc->rxDescCount * rxStatusSize +
                                             sc->txDescCount * txStatusSize);
  for (i = 0; i < sc->rxDescCount; i++) {
    MGETHDR(m, M_WAIT, MT_DATA);
    MCLGET(m, M_WAIT);
    m->m_pkthdr.rcvif = ifp;
    sc->rx[i].m = m;
    /* start dma at 32 bit boundary */
    sc->rx[i].data.addr = (void *) (((intptr_t) m->m_data + 3) & ~3);
    rtems_cache_invalidate_multiple_data_lines(
        sc->rx[i].data.addr,
        BFIN_ETHERNET_MAX_FRAME_LENGTH + 2);
    sc->rx[i].data.dmaConfig = DMA_MODE_RX;
    sc->rx[i].data.next = &(sc->rx[i].status);
    sc->rx[i].status.addr = ptr;
    if (i < sc->rxDescCount - 1) {
      sc->rx[i].status.dmaConfig = DMA_MODE_STATUS;
      sc->rx[i].status.next = &(sc->rx[i + 1].data);
    } else {
      sc->rx[i].status.dmaConfig = DMA_MODE_STATUS_LAST;
      sc->rx[i].status.next = &(sc->rx[0].data);
    }
    ptr += rxStatusSize;
  }
  rtems_cache_flush_multiple_data_lines(sc->rx, sc->rxDescCount *
                                                sizeof(*sc->rx));
  for (i = 0; i < sc->txDescCount; i++) {
    sc->tx[i].data.addr = &sc->tx[i].buffer.packet;
    sc->tx[i].data.dmaConfig = DMA_MODE_TX;
    sc->tx[i].data.next = &(sc->tx[i].status);
    sc->tx[i].status.addr = ptr;
    sc->tx[i].status.dmaConfig = DMA_MODE_STATUS_LAST;
    if (i < sc->txDescCount - 1)
      sc->tx[i].status.next = &(sc->tx[i + 1].data);
    else
      sc->tx[i].status.next = &(sc->tx[0].data);
    sc->tx[i].inUse = false;
    ptr += txStatusSize;
  }
  rtems_cache_flush_multiple_data_lines(sc->tx, sc->txDescCount *
                                                sizeof(*sc->tx));

  BFIN_REG32(rxdmaBase, DMA_NEXT_DESC_PTR_OFFSET) = (uint32_t) &sc->rx[0].data;
  BFIN_REG32(txdmaBase, DMA_NEXT_DESC_PTR_OFFSET) = (uint32_t) &sc->tx[0].data;

  hwaddr = sc->arpcom.ac_enaddr;
  BFIN_REG16(ethBase, EMAC_ADDRHI_OFFSET) = ((uint16_t) hwaddr[5] << 8) |
                                            hwaddr[4];
  BFIN_REG32(ethBase, EMAC_ADDRLO_OFFSET) = ((uint32_t) hwaddr[3] << 24) |
                                            ((uint32_t) hwaddr[2] << 16) |
                                            ((uint32_t) hwaddr[1] << 8) |
                                            hwaddr[0];

  if (sc->acceptBroadcast)
    BFIN_REG32(ethBase, EMAC_OPMODE_OFFSET) &= ~EMAC_OPMODE_DBF;
  else
    BFIN_REG32(ethBase, EMAC_OPMODE_OFFSET) |= EMAC_OPMODE_DBF;

}
static void txDaemon(void *arg) {
  struct bfin_ethernetSoftc *sc;
  struct ifnet *ifp;
  struct mbuf *m, *first;
  rtems_event_set events;
  void *ethBase;
  void *txdmaBase;
  txStatusT *status;
  int head;
  int prevHead;
  int tail;
  int length;
  char *ptr;

  sc = (struct bfin_ethernetSoftc *) arg;
  ifp = &sc->arpcom.ac_if;

  ethBase = sc->ethBase;
  txdmaBase = sc->txdmaBase;
  head = 0;
  prevHead = sc->txDescCount - 1;
  tail = 0;

  while (1) {
    /* wait for packet or isr */
    rtems_bsdnet_event_receive(START_TRANSMIT_EVENT | INTERRUPT_EVENT,
                               RTEMS_EVENT_ANY | RTEMS_WAIT,
                               RTEMS_NO_TIMEOUT, &events);

    /* if no descriptors are available, try to free one.  To reduce
       transmit latency only do one here. */
    if (sc->tx[head].inUse && txFree(sc, tail)) {
      if (++tail == sc->txDescCount)
        tail = 0;
    }
    /* send packets until the queue is empty or we run out of tx
       descriptors */
    while (!sc->tx[head].inUse && (ifp->if_flags & IFF_OACTIVE)) {
      /* get the next mbuf chain to transmit */
      IF_DEQUEUE(&ifp->if_snd, m);
      if (m != NULL) {
        /* copy packet into our buffer */
        ptr = sc->tx[head].buffer.packet.data;
        length = 0;
        first = m;
        while (m && length <= BFIN_ETHERNET_MAX_FRAME_LENGTH) {
          length += m->m_len;
          if (length <= BFIN_ETHERNET_MAX_FRAME_LENGTH)
            memcpy(ptr, m->m_data, m->m_len);
          ptr += m->m_len;
          m = m->m_next;
        }
        m_freem(first); /* all done with mbuf */
        if (length <= BFIN_ETHERNET_MAX_FRAME_LENGTH) {
          sc->tx[head].buffer.packet.length = length;

          /* setup tx dma */
          status = (txStatusT *) sc->tx[head].status.addr;
          status->status = 0;
          sc->tx[head].inUse = true;
          rtems_cache_flush_multiple_data_lines(status, sizeof(*status));

          /* configure dma to stop after sending this packet */
          sc->tx[head].status.dmaConfig = DMA_MODE_STATUS_LAST;
          rtems_cache_flush_multiple_data_lines(
              &sc->tx[head].status.dmaConfig,
              sizeof(sc->tx[head].status.dmaConfig));
          rtems_cache_flush_multiple_data_lines(
              &sc->tx[head].buffer.packet,
              length + sizeof(uint16_t));

          /* modify previous descriptor to let it continue
             automatically */
          sc->tx[prevHead].status.dmaConfig = DMA_MODE_STATUS;
          rtems_cache_flush_multiple_data_lines(
              &sc->tx[prevHead].status.dmaConfig,
              sizeof(sc->tx[prevHead].status.dmaConfig));

          /* restart dma if it stopped before the packet we just
             added.  this is purely to reduce transmit latency,
             as it would be restarted anyway after this loop (and
             needs to be, as there's a very small chance that the
             dma controller had started the last status transfer
             before the new dmaConfig word was written above and
             is still doing that status transfer when we check the
             status below.  this will be caught by the check
             outside the loop as that is guaranteed to run at least
             once after the last dma complete interrupt. */
          if ((BFIN_REG16(txdmaBase, DMA_IRQ_STATUS_OFFSET) &
               DMA_IRQ_STATUS_DMA_RUN) == 0 &&
               BFIN_REG32(txdmaBase, DMA_NEXT_DESC_PTR_OFFSET) !=
               (uint32_t) sc->tx[head].data.next) {
            BFIN_REG16(txdmaBase, DMA_CONFIG_OFFSET) = DMA_MODE_TX;
            BFIN_REG32(ethBase, EMAC_OPMODE_OFFSET) |= EMAC_OPMODE_TE;
          }

          if (++head == sc->txDescCount)
            head = 0;
          if (++prevHead == sc->txDescCount)
            prevHead = 0;

          /* if no descriptors are available, try to free one */
          if (sc->tx[head].inUse && txFree(sc, tail)) {
            if (++tail == sc->txDescCount)
              tail = 0;
          }
        } else {
          /* dropping packet: too large */

        }
      } else {
        /* no packets queued */
        ifp->if_flags &= ~IFF_OACTIVE;
      }
    }

    /* if dma stopped and there's more to do, restart it */
    if ((BFIN_REG16(txdmaBase, DMA_IRQ_STATUS_OFFSET) &
         DMA_IRQ_STATUS_DMA_RUN) == 0 &&
        BFIN_REG32(txdmaBase, DMA_NEXT_DESC_PTR_OFFSET) !=
        (uint32_t) &sc->tx[head].data) {
      BFIN_REG16(txdmaBase, DMA_CONFIG_OFFSET) = DMA_MODE_TX;
      BFIN_REG32(ethBase, EMAC_OPMODE_OFFSET) |= EMAC_OPMODE_TE;
    }

    /* free up any additional tx descriptors */
    while (txFree(sc, tail)) {
      if (++tail == sc->txDescCount)
        tail = 0;
    }
  }
}
示例#18
0
文件: init.c 项目: ChOr82/RTEMS
static void test_timing(void)
{
  rtems_interrupt_lock lock;
  rtems_interrupt_lock_context lock_context;
  size_t data_size = sizeof(data);
  uint64_t d[3];
  uint32_t cache_level;
  size_t cache_size;

  rtems_interrupt_lock_initialize(&lock, "test");

  printf(
    "data cache line size %zi bytes\n"
    "data cache size %zi bytes\n",
    rtems_cache_get_data_line_size(),
    rtems_cache_get_data_cache_size(0)
  );

  cache_level = 1;
  cache_size = rtems_cache_get_data_cache_size(cache_level);
  while (cache_size > 0) {
    printf(
      "data cache level %" PRIu32 " size %zi bytes\n",
      cache_level,
      cache_size
    );
    ++cache_level;
    cache_size = rtems_cache_get_data_cache_size(cache_level);
  }

  rtems_interrupt_lock_acquire(&lock, &lock_context);

  d[0] = load();
  d[1] = load();
  rtems_cache_flush_entire_data();
  d[2] = load();

  rtems_interrupt_lock_release(&lock, &lock_context);

  printf(
    "load %zi bytes with flush entire data\n"
    "  duration with normal cache %" PRIu64 " ns\n"
    "  duration with warm cache %" PRIu64 " ns\n"
    "  duration with flushed cache %" PRIu64 " ns\n",
    data_size,
    d[0],
    d[1],
    d[2]
  );

  rtems_interrupt_lock_acquire(&lock, &lock_context);

  d[0] = load();
  d[1] = load();
  rtems_cache_flush_multiple_data_lines(&data[0], sizeof(data));
  d[2] = load();

  rtems_interrupt_lock_release(&lock, &lock_context);

  printf(
    "load %zi bytes with flush multiple data\n"
    "  duration with normal cache %" PRIu64 " ns\n"
    "  duration with warm cache %" PRIu64 " ns\n"
    "  duration with flushed cache %" PRIu64 " ns\n",
    data_size,
    d[0],
    d[1],
    d[2]
  );

  rtems_interrupt_lock_acquire(&lock, &lock_context);

  d[0] = load();
  d[1] = load();
  rtems_cache_invalidate_multiple_data_lines(&data[0], sizeof(data));
  d[2] = load();

  rtems_interrupt_lock_release(&lock, &lock_context);

  printf(
    "load %zi bytes with invalidate multiple data\n"
    "  duration with normal cache %" PRIu64 " ns\n"
    "  duration with warm cache %" PRIu64 " ns\n"
    "  duration with invalidated cache %" PRIu64 " ns\n",
    data_size,
    d[0],
    d[1],
    d[2]
  );

  rtems_interrupt_lock_acquire(&lock, &lock_context);

  d[0] = store();
  d[1] = store();
  rtems_cache_flush_entire_data();
  d[2] = store();

  rtems_interrupt_lock_release(&lock, &lock_context);

  printf(
    "store %zi bytes with flush entire data\n"
    "  duration with normal cache %" PRIu64 " ns\n"
    "  duration with warm cache %" PRIu64 " ns\n"
    "  duration with flushed cache %" PRIu64 " ns\n",
    data_size,
    d[0],
    d[1],
    d[2]
  );

  rtems_interrupt_lock_acquire(&lock, &lock_context);

  d[0] = store();
  d[1] = store();
  rtems_cache_flush_multiple_data_lines(&data[0], sizeof(data));
  d[2] = store();

  rtems_interrupt_lock_release(&lock, &lock_context);

  printf(
    "store %zi bytes with flush multiple data\n"
    "  duration with normal cache %" PRIu64 " ns\n"
    "  duration with warm cache %" PRIu64 " ns\n"
    "  duration with flushed cache %" PRIu64 " ns\n",
    data_size,
    d[0],
    d[1],
    d[2]
  );

  rtems_interrupt_lock_acquire(&lock, &lock_context);

  d[0] = store();
  d[1] = store();
  rtems_cache_invalidate_multiple_data_lines(&data[0], sizeof(data));
  d[2] = store();

  rtems_interrupt_lock_release(&lock, &lock_context);

  printf(
    "store %zi bytes with invalidate multiple data\n"
    "  duration with normal cache %" PRIu64 " ns\n"
    "  duration with warm cache %" PRIu64 " ns\n"
    "  duration with invalidated cache %" PRIu64 " ns\n",
    data_size,
    d[0],
    d[1],
    d[2]
  );

  printf(
    "instruction cache line size %zi bytes\n"
    "instruction cache size %zi bytes\n",
    rtems_cache_get_instruction_line_size(),
    rtems_cache_get_instruction_cache_size(0)
  );

  cache_level = 1;
  cache_size = rtems_cache_get_instruction_cache_size(cache_level);
  while (cache_size > 0) {
    printf(
      "instruction cache level %" PRIu32 " size %zi bytes\n",
      cache_level,
      cache_size
    );
    ++cache_level;
    cache_size = rtems_cache_get_instruction_cache_size(cache_level);
  }

  rtems_interrupt_lock_acquire(&lock, &lock_context);

  d[0] = do_some_work();
  d[1] = do_some_work();
  rtems_cache_invalidate_entire_instruction();
  d[2] = do_some_work();

  rtems_interrupt_lock_release(&lock, &lock_context);

  printf(
    "invalidate entire instruction\n"
    "  duration with normal cache %" PRIu64 " ns\n"
    "  duration with warm cache %" PRIu64 " ns\n"
    "  duration with invalidated cache %" PRIu64 " ns\n",
    d[0],
    d[1],
    d[2]
  );

  rtems_interrupt_lock_acquire(&lock, &lock_context);

  d[0] = do_some_work();
  d[1] = do_some_work();
  rtems_cache_invalidate_multiple_instruction_lines(do_some_work, 4096);
  d[2] = do_some_work();

  rtems_interrupt_lock_release(&lock, &lock_context);

  printf(
    "invalidate multiple instruction\n"
    "  duration with normal cache %" PRIu64 " ns\n"
    "  duration with warm cache %" PRIu64 " ns\n"
    "  duration with invalidated cache %" PRIu64 " ns\n",
    d[0],
    d[1],
    d[2]
  );

  rtems_interrupt_lock_destroy(&lock);
}
示例#19
0
文件: init.c 项目: ChOr82/RTEMS
static void test_data_flush_and_invalidate(void)
{
  if (rtems_cache_get_data_line_size() > 0) {
    rtems_interrupt_lock lock;
    rtems_interrupt_lock_context lock_context;
    volatile int *vdata = &data[0];
    int n = 32;
    int i;
    size_t data_size = n * sizeof(data[0]);
    bool write_through;

    printf("data cache flush and invalidate test\n");

    rtems_interrupt_lock_initialize(&lock, "test");
    rtems_interrupt_lock_acquire(&lock, &lock_context);

    for (i = 0; i < n; ++i) {
      vdata[i] = i;
    }

    rtems_cache_flush_multiple_data_lines(&data[0], data_size);

    for (i = 0; i < n; ++i) {
      rtems_test_assert(vdata[i] == i);
    }

    for (i = 0; i < n; ++i) {
      vdata[i] = ~i;
    }

    rtems_cache_invalidate_multiple_data_lines(&data[0], data_size);

    write_through = vdata[0] == ~0;
    if (write_through) {
      for (i = 0; i < n; ++i) {
        rtems_test_assert(vdata[i] == ~i);
      }
    } else {
      for (i = 0; i < n; ++i) {
        rtems_test_assert(vdata[i] == i);
      }
    }

    for (i = 0; i < n; ++i) {
      vdata[i] = ~i;
    }

    rtems_cache_flush_multiple_data_lines(&data[0], data_size);
    rtems_cache_invalidate_multiple_data_lines(&data[0], data_size);

    for (i = 0; i < n; ++i) {
      rtems_test_assert(vdata[i] == ~i);
    }

    rtems_interrupt_lock_release(&lock, &lock_context);
    rtems_interrupt_lock_destroy(&lock);

    printf(
      "data cache operations by line passed the test (%s cache detected)\n",
      write_through ? "write-through" : "copy-back"
    );
  } else {
    printf(
      "skip data cache flush and invalidate test"
        " due to cache line size of zero\n"
    );
  }

  /* Make sure these are nops */
  rtems_cache_flush_multiple_data_lines(NULL, 0);
  rtems_cache_invalidate_multiple_data_lines(NULL, 0);
}