Example #1
0
//
// This function is called as a result of the "eth_drv_recv()" call above.
// It's job is to actually fetch data for a packet from the hardware once
// memory buffers have been allocated for the packet.  Note that the buffers
// may come in pieces, using a scatter-gather list.  This allows for more
// efficient processing in the upper layers of the stack.
//
static void
quicc_eth_recv(struct eth_drv_sc *sc, struct eth_drv_sg *sg_list, int sg_len)
{
    struct quicc_eth_info *qi = (struct quicc_eth_info *)sc->driver_private;
    unsigned char *bp;
    int i, cache_state;
    int sg_list_null_buffer = 0;
    
    bp = (unsigned char *)qi->rxbd->buffer;
    // Note: the MPC8xx does not seem to snoop/invalidate the data cache properly!
    HAL_DCACHE_IS_ENABLED(cache_state);
    if (cache_state) {
        HAL_DCACHE_INVALIDATE(qi->rxbd->buffer, qi->rxbd->length);  // Make sure no stale data
    }
    for (i = 0;  i < sg_len;  i++) {
        if (sg_list[i].buf != 0) {
            memcpy((void *)sg_list[i].buf, bp, sg_list[i].len);
            bp += sg_list[i].len;
        }
        else
            sg_list_null_buffer = 1;
    }

    // A NULL sg_list buffer usually means no mbufs, so we don't count
    // it as a delivery, instead we count it as a resource error.
    
    if (!sg_list_null_buffer)
        qi->rx_deliver++;
    else
        qi->rx_resource++;

}
Example #2
0
/*
 * Get a character from a port, non-blocking
 * This function can be called on either an SMC or SCC port
 */
static cyg_bool
cyg_hal_sxx_getc_nonblock(void* __ch_data, cyg_uint8* ch)
{
    volatile struct cp_bufdesc *bd;
    EPPC *eppc = eppc_base();
    struct port_info *info = (struct port_info *)__ch_data;
    volatile struct smc_uart_pram *uart_pram = (volatile struct smc_uart_pram *)((char *)eppc + info->pram);
    int cache_state;

    /* rx buffer descriptor */
    bd = info->next_rxbd;

    if (bd->ctrl & QUICC_BD_CTL_Ready)
        return false;

    *ch = bd->buffer[0];

    bd->length = 0;
    bd->buffer[0] = '\0';
    bd->ctrl |= QUICC_BD_CTL_Ready;
    if (bd->ctrl & QUICC_BD_CTL_Wrap) {
        bd = (struct cp_bufdesc *)((char *)eppc + uart_pram->rbase);
    } else {
        bd++;
    }
    info->next_rxbd = bd;

    // Note: the MBX860 does not seem to snoop/invalidate the data cache properly!
    HAL_DCACHE_IS_ENABLED(cache_state);
    if (cache_state) {
        HAL_DCACHE_INVALIDATE(bd->buffer, uart_pram->mrblr);  // Make sure no stale data
    }

    return true;
}
Example #3
0
static void
mpc8xxx_scc_serial_DSR(serial_channel *chan)
{
    mpc8xxx_sxx_serial_info *smc_chan = (mpc8xxx_sxx_serial_info *)chan->dev_priv;
    volatile struct scc_regs_8260 *ctl = (volatile struct scc_regs_8260 *)smc_chan->ctl;
    volatile struct cp_bufdesc *txbd;
    volatile struct cp_bufdesc *rxbd = smc_chan->rxbd;
    volatile t_Smc_Pram *pram = (volatile t_Smc_Pram *)smc_chan->pram;
    struct cp_bufdesc *rxlast;
    int i, cache_state;

    if (ctl->scce & SCCE_Tx) {
        // Transmit interrupt
        ctl->scce = SCCE_Tx;  // Reset interrupt state;
        txbd = smc_chan->tbase;  // First buffer
        while (true) {
            if ((txbd->ctrl & (_BD_CTL_Ready|_BD_CTL_Int)) == _BD_CTL_Int) {
                txbd->length = 0;
                txbd->ctrl &= ~_BD_CTL_Int;  // Reset interrupt bit
            }
            if (txbd->ctrl & _BD_CTL_Wrap) {
                txbd = smc_chan->tbase;
                break;
            } else {
                txbd++;
            }
        }
        (chan->callbacks->xmt_char)(chan);
    }
    while (ctl->scce & SCCE_Rx) {
        // Receive interrupt
        ctl->scce = SCCE_Rx;  // Reset interrupt state;
        rxlast = (struct cp_bufdesc *) ((char *)IMM + pram->rbptr);
        while (rxbd != rxlast) {
            if ((rxbd->ctrl & _BD_CTL_Ready) == 0) {
                for (i = 0;  i < rxbd->length;  i++) {
                    (chan->callbacks->rcv_char)(chan, rxbd->buffer[i]);
                }
                // Note: the MBX860 does not seem to snoop/invalidate the data cache properly!
                HAL_DCACHE_IS_ENABLED(cache_state);
                if (cache_state) {
                    HAL_DCACHE_INVALIDATE(rxbd->buffer, smc_chan->rxsize);  // Make sure no stale data
                }
                rxbd->length = 0;
                rxbd->ctrl |= _BD_CTL_Ready;
            }
            if (rxbd->ctrl & _BD_CTL_Wrap) {
                rxbd = smc_chan->rbase;
            } else {
                rxbd++;
            }
        }
        smc_chan->rxbd = (struct cp_bufdesc *)rxbd;
    }
    if (ctl->scce & SCCE_Bsy) {
        ctl->scce = SCCE_Bsy;  // Reset interrupt state;
    }
    cyg_drv_interrupt_acknowledge(smc_chan->int_num);
    cyg_drv_interrupt_unmask(smc_chan->int_num);
}
Example #4
0
static void
fcc_eth_TxEvent(struct eth_drv_sc *sc, int stat)
{
  struct fcc_eth_info *qi = (struct fcc_eth_info *)sc->driver_private;
  struct fcc_bd *txbd;
  int txindex;
#ifndef FCC_BDs_NONCACHED
  int cache_state;
#endif

#ifndef FCC_BDs_NONCACHED
  // Make sure no stale data
  HAL_DCACHE_IS_ENABLED(cache_state);
  if (cache_state) {
    HAL_DCACHE_INVALIDATE(fcc_eth_txring, 
                          8*CYGNUM_DEVS_ETH_POWERPC_FCC_TxNUM);
  }
#endif

  txbd = qi->tnext;
  // Note: TC field is used to indicate the buffer has/had data in it
  while ( (txbd->ctrl & (FCC_BD_Tx_TC | FCC_BD_Tx_Ready)) == FCC_BD_Tx_TC ) {
      if ((txbd->ctrl & FCC_BD_Tx_ERRORS) != 0) {
#if 0
          diag_printf("FCC Tx error BD: %x/%x- ", txbd, txbd->ctrl);
          if ((txbd->ctrl & FCC_BD_Tx_LC) != 0) diag_printf("Late Collision/");
          if ((txbd->ctrl & FCC_BD_Tx_RL) != 0) diag_printf("Retry limit/");
//          if ((txbd->ctrl & FCC_BD_Tx_RC) != 0) diag_printf("Late Collision/");
          if ((txbd->ctrl & FCC_BD_Tx_UN) != 0) diag_printf("Underrun/");
          if ((txbd->ctrl & FCC_BD_Tx_CSL) != 0) diag_printf("Carrier Lost/");
          diag_printf("\n");
#endif
      }

    txindex = ((unsigned long)txbd - (unsigned long)qi->tbase) / sizeof(*txbd);
    (sc->funs->eth_drv->tx_done)(sc, qi->txkey[txindex], 0);
    txbd->ctrl &= ~FCC_BD_Tx_TC;
    if (txbd->ctrl & FCC_BD_Tx_Wrap) {
      txbd = qi->tbase;
    } else {
      txbd++;
    }
  }
  // Remember where we left off
  qi->tnext = (struct fcc_bd *)txbd;

  // Make sure no stale data  
#ifndef FCC_BDs_NONCACHED
  if (cache_state) {
    HAL_DCACHE_FLUSH(fcc_eth_txring, 
                     8*CYGNUM_DEVS_ETH_POWERPC_FCC_TxNUM);
  }
#endif

}
Example #5
0
static void
flush_dcache(void *__p, int __nbytes)
{
    CYGARC_HAL_SAVE_GP();
#ifdef HAL_DCACHE_FLUSH
    HAL_DCACHE_FLUSH( __p , __nbytes );
#elif defined(HAL_DCACHE_INVALIDATE)
    HAL_DCACHE_INVALIDATE();
#endif
    CYGARC_HAL_RESTORE_GP();
}
Example #6
0
//
// This function is called when a packet has been received.  It's job is
// to prepare to unload the packet from the hardware.  Once the length of
// the packet is known, the upper layer of the driver can be told.  When
// the upper layer is ready to unload the packet, the internal function
// 'fcc_eth_recv' will be called to actually fetch it from the hardware.
//
static void
fcc_eth_RxEvent(struct eth_drv_sc *sc)
{
  struct fcc_eth_info *qi = (struct fcc_eth_info *)sc->driver_private;
  struct fcc_bd *rxbd;
  int cache_state;

  HAL_DCACHE_IS_ENABLED(cache_state);
#ifndef FCC_BDs_NONCACHED
  if (cache_state) {
    HAL_DCACHE_INVALIDATE(fcc_eth_rxring, 
                          8*CYGNUM_DEVS_ETH_POWERPC_FCC_RxNUM);
  }
#endif

  rxbd = qi->rnext;
  while ((rxbd->ctrl & FCC_BD_Rx_Empty) == 0) {
    qi->rxbd = rxbd;  // Save for callback

    // This is the right way of doing it, but dcbi has a bug ...
    //    if (cache_state) {
    //      HAL_DCACHE_INVALIDATE(rxbd->buffer, rxbd->length); 
    //    }
    if ((rxbd->ctrl & FCC_BD_Rx_ERRORS) == 0) {
        (sc->funs->eth_drv->recv)(sc, rxbd->length);
#if 1 // Coherent caches?
        if (cache_state) {
            HAL_DCACHE_FLUSH(rxbd->buffer, rxbd->length); 
        }
#endif
    }
    // Reset control flags to known [empty] state, clearing error bits
    if (rxbd->ctrl & FCC_BD_Rx_Wrap) {
      rxbd->ctrl = FCC_BD_Rx_Empty | FCC_BD_Rx_Int | FCC_BD_Rx_Wrap;
      rxbd = qi->rbase;
    } else {
      rxbd->ctrl = FCC_BD_Rx_Empty | FCC_BD_Rx_Int;
      rxbd++;
    }
  }
  // Remember where we left off
  qi->rnext = (struct fcc_bd *)rxbd;

  // Make sure no stale data
#ifndef FCC_BDs_NONCACHED
  if (cache_state) {
    HAL_DCACHE_FLUSH(fcc_eth_rxring, 
                     8*CYGNUM_DEVS_ETH_POWERPC_FCC_RxNUM);
  }
#endif

}
Example #7
0
//
// This function is called when a packet has been received.  It's job is
// to prepare to unload the packet from the hardware.  Once the length of
// the packet is known, the upper layer of the driver can be told.  When
// the upper layer is ready to unload the packet, the internal function
// 'fec_eth_recv' will be called to actually fetch it from the hardware.
//
static void
fec_eth_RxEvent(struct eth_drv_sc *sc)
{
    struct fec_eth_info *qi = (struct fec_eth_info *)sc->driver_private;
    volatile struct fec_bd *rxbd, *rxfirst;
    int cache_state;

    // Note: the MPC860 does not seem to snoop/invalidate the data cache properly!
    HAL_DCACHE_IS_ENABLED(cache_state);
#ifndef FEC_USE_EPPC_BD
    if (cache_state) {
        HAL_DCACHE_INVALIDATE(fec_eth_rxring, sizeof(fec_eth_rxring));  // Make sure no stale data
    }
#endif
    rxbd = rxfirst = qi->rnext;
    while (true) {
        if ((rxbd->ctrl & FEC_BD_Rx_Empty) == 0) {
            qi->rxbd = rxbd;  // Save for callback
            set_led(LED_RxACTIVE);
            (sc->funs->eth_drv->recv)(sc, rxbd->length);
        }
        if (rxbd->ctrl & FEC_BD_Rx_Wrap) {
            rxbd = qi->rbase;
        } else {
            rxbd++;
        }
        if (rxbd == rxfirst) {
            break;
        }
    }
    // Remember where we left off
    qi->rnext = (struct fec_bd *)rxbd;
#ifndef FEC_USE_EPPC_BD
    if (cache_state) {
        HAL_DCACHE_INVALIDATE(fec_eth_rxring, sizeof(fec_eth_rxring));  // Make sure no stale data
    }
#endif
    qi->fec->RxUpdate = 0x0F0F0F0F;  // Any write tells machine to look for work
}
Example #8
0
//
// This function is called as a result of the "eth_drv_recv()" call above.
// It's job is to actually fetch data for a packet from the hardware once
// memory buffers have been allocated for the packet.  Note that the buffers
// may come in pieces, using a scatter-gather list.  This allows for more
// efficient processing in the upper layers of the stack.
//
static void tsec_eth_recv(struct eth_drv_sc *sc, struct eth_drv_sg *sg_list,
		int sg_len)
{
	struct tsec_eth_info *qi = (struct tsec_eth_info *) sc->driver_private;
	unsigned char *bp;
	int i;

	bp = (unsigned char *) qi->rxbd->buffer;
#if CACHE()
	int cache_state;
	// Note: the MPC8xx does not seem to snoop/invalidate the data cache properly!
	HAL_DCACHE_IS_ENABLED(cache_state);
	if (cache_state)
	{
		HAL_DCACHE_INVALIDATE(qi->rxbd->buffer, qi->rxbd->length); // Make sure no stale data
	}
#endif
	for (i = 0; i < sg_len; i++)
	{
		if (sg_list[i].buf != 0)
		{
			memcpy((void *) sg_list[i].buf, bp, sg_list[i].len);
			bp += sg_list[i].len;
//			//debug:
//			if(index_in_memory_area + sg_list[i].len < 1024 * 1024 )
//			{
//				stopper ++;
//				memcpy(memory_area + index_in_memory_area, bp, sg_list[i].len);
//				index_in_memory_area += sg_list[i].len;
//				if(stopper == 20)
//					os_printf("break!\n");
//			}
//			else
//				os_printf("break!\n");
		}
	}
	qi->rxbd->ctrl |= FEC_BD_Rx_Empty;
	qi->rxbd->length = 0;
#if CACHE()
	if (cache_state)
	{
		HAL_DCACHE_FLUSH(qi->rxbd, sizeof(*qi->rxbd));
	}
#endif

	//    clear_led(LED_RxACTIVE);
}
Example #9
0
//
// This function is called when a packet has been received.  It's job is
// to prepare to unload the packet from the hardware.  Once the length of
// the packet is known, the upper layer of the driver can be told.  When
// the upper layer is ready to unload the packet, the internal function
// 'fec_eth_recv' will be called to actually fetch it from the hardware.
//
static void
fec_eth_RxEvent(struct eth_drv_sc *sc)
{
  struct fec_eth_info *qi = (struct fec_eth_info *)sc->driver_private;
  struct fec_bd *rxbd;
  int cache_state;

  HAL_DCACHE_IS_ENABLED(cache_state);
#ifndef FEC_BDs_NONCACHED
  if (cache_state) {
    HAL_DCACHE_INVALIDATE(fec_eth_rxring, 
                          8*CYGNUM_DEVS_ETH_POWERPC_QUICC2_RxNUM);
  }
#endif

  rxbd = qi->rnext;
  while ((rxbd->ctrl & FEC_BD_Rx_Empty) == 0) {
    qi->rxbd = rxbd;  // Save for callback

    // This is the right way of doing it, but dcbi has a bug ...
    //    if (cache_state) {
    //      HAL_DCACHE_INVALIDATE(rxbd->buffer, rxbd->length); 
    //    }
    (sc->funs->eth_drv->recv)(sc, rxbd->length);
    if (cache_state) {
      HAL_DCACHE_FLUSH(rxbd->buffer, rxbd->length); 
    }

    rxbd->ctrl |= FEC_BD_Rx_Empty;
    if (rxbd->ctrl & FEC_BD_Rx_Wrap) {
      rxbd = qi->rbase;
    } else {
      rxbd++;
    }
  }
  // Remember where we left off
  qi->rnext = (struct fec_bd *)rxbd;

  // Make sure no stale data
#ifndef FEC_BDs_NONCACHED
  if (cache_state) {
    HAL_DCACHE_FLUSH(fec_eth_rxring, 
                     8*CYGNUM_DEVS_ETH_POWERPC_QUICC2_RxNUM);
  }
#endif

}
Example #10
0
//
// This function is called to see if another packet can be sent.
// It should return the number of packets which can be handled.
// Zero should be returned if the interface is busy and can not send any more.
//
static int tsec_eth_can_send(struct eth_drv_sc *sc)
{
	struct tsec_eth_info *qi = (struct tsec_eth_info *) sc->driver_private;
	volatile struct tsec_bd *txbd;
	txbd = qi->txbd;
#if CACHE()
	int cache_state;
	HAL_DCACHE_IS_ENABLED(cache_state);
	if (cache_state)
	{
		/* avoid any naggig doubt that a txbd could cross a cache line and flush each bd */
		HAL_DCACHE_INVALIDATE(txbd, sizeof(*txbd));
	}
#endif
    /* FIX!!!! how to do this robustly??? */
	return (txbd->ctrl & FEC_BD_Tx_Ready)==0;
}
Example #11
0
//
// This function is called to see if another packet can be sent.
// It should return the number of packets which can be handled.
// Zero should be returned if the interface is busy and can not send any more.
//
static int
fec_eth_can_send(struct eth_drv_sc *sc)
{
  struct fec_eth_info *qi = (struct fec_eth_info *)sc->driver_private;
  volatile struct fec_bd *txbd = qi->txbd;
  int cache_state;

  HAL_DCACHE_IS_ENABLED(cache_state);
#ifndef FEC_BDs_NONCACHED
  if (cache_state) {
    HAL_DCACHE_INVALIDATE(fec_eth_txring, 
                          8*CYGNUM_DEVS_ETH_POWERPC_QUICC2_TxNUM);
  }
#endif

  return ((txbd->ctrl & (FCC_BD_Tx_TC | FCC_BD_Tx_Ready)) == 0);
}
Example #12
0
//
// This function is called when a packet has been received.  It's job is
// to prepare to unload the packet from the hardware.  Once the length of
// the packet is known, the upper layer of the driver can be told.  When
// the upper layer is ready to unload the packet, the internal function
// 'fec_eth_recv' will be called to actually fetch it from the hardware.
//
static void tsec_eth_RxEvent(struct eth_drv_sc *sc)
{
	struct tsec_eth_info *qi = (struct tsec_eth_info *) sc->driver_private;
	volatile struct tsec_bd *rxbd, *rxfirst;

#if CACHE()
	int cache_state;
	HAL_DCACHE_IS_ENABLED(cache_state);
#endif

	rxbd = rxfirst = qi->rnext;
	while (true)
	{
#if CACHE()
		if (cache_state)
		{
			HAL_DCACHE_INVALIDATE(rxbd, sizeof(*rxbd));
		}
#endif
		if ((rxbd->ctrl & FEC_BD_Rx_Empty) == 0)
		{
			qi->rxbd = rxbd; // Save for callback
			//            set_led(LED_RxACTIVE);     // Remove the CRC

			/* cache invalidation happens closer to actual use */
			(sc->funs->eth_drv->recv)(sc, rxbd->length - 4);
		}
		if (rxbd->ctrl & FEC_BD_Rx_Wrap)
		{
			rxbd = qi->rbase;
		}
		else
		{
			rxbd++;
		}
		if (rxbd == rxfirst)
		{
			break;
		}
	}
	// Remember where we left off
	qi->rnext = (struct tsec_bd *) rxbd;
}
Example #13
0
//
// This function is called as a result of the "eth_drv_recv()" call above.
// It's job is to actually fetch data for a packet from the hardware once
// memory buffers have been allocated for the packet.  Note that the buffers
// may come in pieces, using a scatter-gather list.  This allows for more
// efficient processing in the upper layers of the stack.
//
static void
quicc_eth_recv(struct eth_drv_sc *sc, struct eth_drv_sg *sg_list, int sg_len)
{
    struct quicc_eth_info *qi = (struct quicc_eth_info *)sc->driver_private;
    unsigned char *bp;
    int i, cache_state;

    bp = (unsigned char *)qi->rxbd->buffer;
    // Note: the MBX860 does not seem to snoop/invalidate the data cache properly!
    HAL_DCACHE_IS_ENABLED(cache_state);
    if (cache_state) {
        HAL_DCACHE_INVALIDATE(qi->rxbd->buffer, qi->rxbd->length);  // Make sure no stale data
    }
    for (i = 0;  i < sg_len;  i++) {
        if (sg_list[i].buf != 0) {
            memcpy((void *)sg_list[i].buf, bp, sg_list[i].len);
            bp += sg_list[i].len;
        }
    }
}
Example #14
0
/* check *all* buffer descriptors */
static void tsec_eth_TxEvent(struct eth_drv_sc *sc)
{
	struct tsec_eth_info *qi = (struct tsec_eth_info *) sc->driver_private;
	volatile struct tsec_bd *txbd;
	int key, txindex;

	// Make sure no stale data
#if CACHE()
	int cache_state;
	HAL_DCACHE_IS_ENABLED(cache_state);
#endif

	txbd = qi->tbase;
	for (;;)
	{
#if CACHE()
		if (cache_state)
		{
			HAL_DCACHE_INVALIDATE(txbd, sizeof(*txbd));
		}
#endif
		// Note: TC field is used to indicate the buffer has/had data in it
		int wrap=(txbd->ctrl & FEC_BD_Tx_Wrap);
		if ((txbd->ctrl & (FEC_BD_Tx_Ready)) == 0)
		{
			txindex = ((unsigned long) txbd - (unsigned long) qi->tbase)
					/ sizeof(*txbd);

			if ((key = qi->txkey[txindex]) != 0)
			{
				qi->txkey[txindex] = 0;
				(sc->funs->eth_drv->tx_done)(sc, key, 0);
			}
		}
		if (wrap)
		{
			break;
		}
		txbd++;
	}
}
Example #15
0
static void
fec_eth_TxEvent(struct eth_drv_sc *sc, int stat)
{
  struct fec_eth_info *qi = (struct fec_eth_info *)sc->driver_private;
  struct fec_bd *txbd;
  int txindex, cache_state;

  // Make sure no stale data
  HAL_DCACHE_IS_ENABLED(cache_state);
#ifndef FEC_BDs_NONCACHED
  if (cache_state) {
    HAL_DCACHE_INVALIDATE(fec_eth_txring, 
                          8*CYGNUM_DEVS_ETH_POWERPC_QUICC2_TxNUM);
  }
#endif

  txbd = qi->tnext;
  // Note: TC field is used to indicate the buffer has/had data in it
  while ( (txbd->ctrl & (FEC_BD_Tx_TC | FEC_BD_Tx_Ready)) == FEC_BD_Tx_TC ) {
    txindex = ((unsigned long)txbd - (unsigned long)qi->tbase) / sizeof(*txbd);
    (sc->funs->eth_drv->tx_done)(sc, qi->txkey[txindex], 0);
    txbd->ctrl &= ~FEC_BD_Tx_TC;
    if (txbd->ctrl & FEC_BD_Tx_Wrap) {
      txbd = qi->tbase;
    } else {
      txbd++;
    }
  }
  // Remember where we left off
  qi->tnext = (struct fec_bd *)txbd;

  // Make sure no stale data  
#ifndef FEC_BDs_NONCACHED
  if (cache_state) {
    HAL_DCACHE_FLUSH(fec_eth_txring, 
                     8*CYGNUM_DEVS_ETH_POWERPC_QUICC2_TxNUM);
  }
#endif

}
// Serial I/O - high level interrupt handler (DSR)
static void
quicc_smc_serial_DSR(serial_channel *chan)
{
    quicc_sxx_serial_info *smc_chan = (quicc_sxx_serial_info *)chan->dev_priv;
    volatile struct smc_regs *ctl = (volatile struct smc_regs *)smc_chan->ctl;
    volatile struct cp_bufdesc *txbd;
    volatile struct cp_bufdesc *rxbd = smc_chan->rxbd;
    volatile struct smc_uart_pram *pram = (volatile struct smc_uart_pram *)smc_chan->pram;
    struct cp_bufdesc *rxlast;
    int i, cache_state;

    if (ctl->smc_smce & QUICC_SMCE_TX) {
        // Transmit interrupt
        ctl->smc_smce = QUICC_SMCE_TX;  // Reset interrupt state;
        txbd = smc_chan->tbase;  // First buffer
        while (true) {
            if ((txbd->ctrl & (QUICC_BD_CTL_Ready|QUICC_BD_CTL_Int)) == QUICC_BD_CTL_Int) {
                txbd->length = 0;
                txbd->ctrl &= ~QUICC_BD_CTL_Int;  // Reset interrupt bit
            }
            if (txbd->ctrl & QUICC_BD_CTL_Wrap) {
                txbd = smc_chan->tbase;
                break;
            } else {
                txbd++;
            }
        }
        (chan->callbacks->xmt_char)(chan);
    }
    while (ctl->smc_smce & QUICC_SMCE_RX) {
        // Receive interrupt
        ctl->smc_smce = QUICC_SMCE_RX;  // Reset interrupt state;
        rxlast = (struct cp_bufdesc *) ((char *)eppc_base() + pram->rbptr);
        while (rxbd != rxlast) {
            if ((rxbd->ctrl & QUICC_BD_CTL_Ready) == 0) {
                if((rxbd->ctrl & (QUICC_BD_CTL_Frame | QUICC_BD_CTL_Parity)) == 0) {
                    for (i = 0;  i < rxbd->length;  i++) {
                        (chan->callbacks->rcv_char)(chan, rxbd->buffer[i]);
                    }
                } else {
                    // is this necessary?
                    rxbd->ctrl &= QUICC_BD_CTL_MASK;
                    // should we report the error?
                }
                // Note: the MBX860 does not seem to snoop/invalidate the data cache properly!
                HAL_DCACHE_IS_ENABLED(cache_state);
                if (cache_state) {
                    HAL_DCACHE_INVALIDATE(rxbd->buffer, smc_chan->rxsize);  // Make sure no stale data
                }
                rxbd->length = 0;
                rxbd->ctrl |= QUICC_BD_CTL_Ready;
            }
            if (rxbd->ctrl & QUICC_BD_CTL_Wrap) {
                rxbd = smc_chan->rbase;
            } else {
                rxbd++;
            }
        }
        smc_chan->rxbd = (struct cp_bufdesc *)rxbd;
    }
    if (ctl->smc_smce & QUICC_SMCE_BSY) {
        ctl->smc_smce = QUICC_SMCE_BSY;  // Reset interrupt state;
    }
    cyg_drv_interrupt_acknowledge(smc_chan->int_num);
    cyg_drv_interrupt_unmask(smc_chan->int_num);
}
Example #17
0
//
// This routine is called to send data to the hardware.
static void 
fcc_eth_send(struct eth_drv_sc *sc, struct eth_drv_sg *sg_list, int sg_len, 
             int total_len, unsigned long key)
{
  struct fcc_eth_info *qi = (struct fcc_eth_info *)sc->driver_private;
  struct fcc_bd *txbd, *txfirst;
  volatile char *bp;
  int i, txindex;
  int cache_state;    

  HAL_DCACHE_IS_ENABLED(cache_state);
#ifndef FCC_BDs_NONCACHED
  if (cache_state) {
    HAL_DCACHE_INVALIDATE(fcc_eth_txring, 
                          8*CYGNUM_DEVS_ETH_POWERPC_FCC_TxNUM);
  }
#endif
 
  // Find a free buffer
  txbd = txfirst = qi->txbd;
  while (txbd->ctrl & FCC_BD_Tx_Ready) {
    // This buffer is busy, move to next one
    if (txbd->ctrl & FCC_BD_Tx_Wrap) {
      txbd = qi->tbase;
    } else {
      txbd++;
    }
    if (txbd == txfirst) {
#ifdef CYGPKG_NET
      panic ("No free xmit buffers");
#else
      os_printf("FCC Ethernet: No free xmit buffers\n");
#endif
    }
  }

  // Remember the next buffer to try
  if (txbd->ctrl & FCC_BD_Tx_Wrap) {
    qi->txbd = qi->tbase;
  } else {
    qi->txbd = txbd+1;
  }

  txindex = ((unsigned long)txbd - (unsigned long)qi->tbase) / sizeof(*txbd);
  qi->txkey[txindex] = key;

  // Set up buffer
  txbd->length = total_len;
  bp = txbd->buffer;
  for (i = 0;  i < sg_len;  i++) {
    memcpy((void *)bp, (void *)sg_list[i].buf, sg_list[i].len);
    bp += sg_list[i].len;
  }

  // Make sure no stale data buffer ...
  if (cache_state) {
    HAL_DCACHE_FLUSH(txbd->buffer, txbd->length);
  }

  // Send it on it's way
  txbd->ctrl |= FCC_BD_Tx_Ready | FCC_BD_Tx_Last | FCC_BD_Tx_TC;

#ifndef FCC_BDs_NONCACHED
  if (cache_state) {
    HAL_DCACHE_FLUSH(fcc_eth_txring, 
                     8*CYGNUM_DEVS_ETH_POWERPC_FCC_TxNUM);  
  }
#endif  

}
Example #18
0
static void spi_transaction_do (cyg_spi_device* device, cyg_bool tick_only,
                                 cyg_bool polled, cyg_uint32 count,
                                 const cyg_uint8* tx_data, cyg_uint8* rx_data,
                                 cyg_bool drop_cs)
{
    cyg_spi_freescale_dspi_bus_t* dspi_bus =
          (cyg_spi_freescale_dspi_bus_t*) device->spi_bus;
    cyg_spi_freescale_dspi_device_t* dspi_device =
          (cyg_spi_freescale_dspi_device_t*) device;
    cyg_bool bus_16bit = dspi_device->clocking.bus_16bit;
    cyghwr_devs_freescale_dspi_t* dspi_p = dspi_bus->setup_p->dspi_p;

    cyghwr_hal_freescale_dma_set_t* dma_set_p;
    cyghwr_hal_freescale_edma_t* edma_p = NULL;

    cyg_uint32 count_down;
    cyg_uint32 txfifo_n = dspi_bus->txfifo_n;
    cyg_uint32 pushr;
    cyg_uint32 pushque_n;
    cyg_uint32 dma_chan_rx_i = 0;
    cyg_uint32 dma_chan_tx_i = 0;

#if DEBUG_SPI >= 2
    cyg_uint32 first_turn = 1;
#endif

    DEBUG2_PRINTF("DSPI: transaction: count=%d drop_cs=%d\n", count, drop_cs);

    // Set up peripheral CS field. DSPI automatically asserts and deasserts CS
    pushr = dspi_chip_select_set(tick_only ? -1 : dspi_device->dev_num,
                                dspi_p->mcr & FREESCALE_DSPI_MCR_PCSSE_M, true);
    pushr |= FREESCALE_DSPI_PUSHR_CONT_M;

    dspi_fifo_clear(dspi_p);
    dspi_fifo_drain(dspi_p);

    pushque_n = dspi_bus->pushque_n;
    if(bus_16bit)
        txfifo_n *= 2;

    if((dma_set_p=dspi_bus->setup_p->dma_set_p)) {
        edma_p = dma_set_p->edma_p;
        // Set up the DMA channels.
        dma_chan_rx_i = SPI_DMA_CHAN_I(dma_set_p, RX);
        dma_chan_tx_i = SPI_DMA_CHAN_I(dma_set_p, TX);
        rx_dma_channel_setup(dma_set_p, (cyg_uint8*) rx_data,
                             bus_16bit, &edma_p->tcd[dma_chan_rx_i]);
        hal_freescale_edma_erq_enable(edma_p, dma_chan_rx_i);
    }

    if(!polled)
        cyg_drv_interrupt_unmask(dspi_bus->setup_p->intr_num);
    count_down = count;
    while(count_down) {
#if DEBUG_SPI >= 2
        if(first_turn) {
            if(dspi_bus->pushque_p)
                dspi_bus->pushque_p[0] |= FREESCALE_DSPI_PUSHR_CTCNT_M;
            first_turn = 0;
        }
#endif
        if(dma_set_p && (count_down > txfifo_n)) {
            // Transfer size is larger than DSPI FIFO
            // Use DMA Tx
            count_down = tx_dma_channel_setup(dspi_bus, (cyg_uint8*) tx_data,
                                              count_down, bus_16bit,
                                              pushr, drop_cs);
#if DEBUG_SPI >= 3
            hal_freescale_edma_transfer_diag(edma_p, dma_chan_rx_i, true);
#endif
            // Enable the Tx DMA / SPI controller.
            hal_freescale_edma_erq_enable(edma_p, dma_chan_tx_i);
            DSPI_EOQ_CLEAR(dspi_p);
        } else {
            // Transfer size fits within DSPI FIFO
            // No need for DMA Tx
            DSPI_EOQ_CLEAR(dspi_p);
            count_down = fifo_pushque_fill(dspi_bus, (cyg_uint8*) tx_data,
                                           count_down, bus_16bit,
                                           pushr, drop_cs);
#if DEBUG_SPI >= 3
            cyghwr_devs_freescale_dspi_diag(dspi_bus);
#endif
        }

        if(polled) {
            DEBUG2_PRINTF("DSPI Polled:\n");
            // Busy-wait for DSPI/DMA (polling for completion).
            while(!(dspi_p->sr & FREESCALE_DSPI_SR_EOQF_M));

            if(dma_set_p) // Disable the Tx DMA channel on completion.
                hal_freescale_edma_erq_disable(edma_p, dma_chan_tx_i);
        } else {
            // Wait for DSPI/DMA completion. (interrupt driven).
            cyg_drv_mutex_lock(&dspi_bus->transfer_mutex);
            cyg_drv_dsr_lock();

            DSPI_IRQ_ENABLE(dspi_p);
            DEBUG2_PRINTF("DSPI IRQ: Enabled\n");

            // Sit back and wait for the ISR/DSRs to signal completion.
            cyg_drv_cond_wait (&dspi_bus->transfer_done);

            cyg_drv_dsr_unlock();
            cyg_drv_mutex_unlock(&dspi_bus->transfer_mutex);
        }

        if(dma_set_p) {
            // Make sure that Rx has been drained by DMA.
            if(rx_data)
                while((dspi_p->sr & FREESCALE_DSPI_SR_RFDF_M));
        } else {
            // No DMA - "manually" drain Rx FIFO
            DEBUG2_PRINTF("DSPI FIFO: 'Manually' drain Rx fifo\n");
#if DEBUG_SPI >= 3
            cyghwr_devs_freescale_dspi_diag(dspi_bus);
#endif
            if(rx_data) {
                if(bus_16bit) {
                    cyg_uint16* rx_data16 = (cyg_uint16*) rx_data;
                    while(dspi_p->sr & FREESCALE_DSPI_SR_RXCTR_M)
                        *rx_data16++ = dspi_p->popr;
                    rx_data = (cyg_uint8*) rx_data16;
                } else {
                    while(dspi_p->sr & FREESCALE_DSPI_SR_RXCTR_M)
                        *rx_data++ = dspi_p->popr;
                }
            } else {
                dspi_fifo_drain(dspi_p);
            }
        }
        dspi_fifo_clear(dspi_p);
        // Prepare for next iteration
        if(tx_data) {
            tx_data += pushque_n;
            if(bus_16bit)
                tx_data += pushque_n;
        }
    }
    if(dma_set_p && rx_data) {
        // Rx buffer may be out of sync with cache.
        DEBUG2_PRINTF("DSPI DMA: Invalidate cache\n");
        HAL_DCACHE_INVALIDATE(rx_data, count);
        DEBUG2_PRINTF("DSPI DMA: Cache invalidated\n");
    }
    if(!polled)
        cyg_drv_interrupt_mask(dspi_bus->setup_p->intr_num);

    dspi_device->chip_sel = !drop_cs;
}
Example #19
0
//
// This routine is called to send data to the hardware.
static void tsec_eth_send(struct eth_drv_sc *sc, struct eth_drv_sg *sg_list,
		int sg_len, int total_len, unsigned long key)
{
	struct tsec_eth_info *qi = (struct tsec_eth_info *) sc->driver_private;
	volatile struct tsec_bd *txbd;
	volatile unsigned char *bp;
	int i, txindex;

	volatile struct mpq_tsec *tsec =
			(volatile struct mpq_tsec *) ((unsigned char *) CYGARC_IMM_BASE
					+ CYGARC_REG_IMM_TSEC1);

#if CACHE()
	int cache_state;
	HAL_DCACHE_IS_ENABLED(cache_state);
#endif


    /* if the next buffer isn't free, we're broken */
	txbd = qi->txbd;

//	while ((tsec->tstat & TSTAT_THLT)==0)
//	{
//		/* wait for ring to halt to be able to robustly read the tbptr */
//	}


//	if(qi->txbd != tsec->tbptr)
//	{
//		printf("wtf\n");
//	}

//trust the software, not set it to the value pointed by the HW
//	txbd = tsec->tbptr; // why the "#&¤"#¤&"#&¤" do we fail to keep track of txbd in software???  :-)

#if CACHE()
	if (cache_state)
	{
		/* avoid any naggig doubt that a txbd could cross a cache line and flush each bd */
		HAL_DCACHE_INVALIDATE(txbd, sizeof(*txbd));
	}
#endif

	CYG_ASSERT((txbd->ctrl & FEC_BD_Tx_Ready)==0, "TX buffer not ready when it was expected to be");

	// Set up buffer
	bp = txbd->buffer;
	for (i = 0; i < sg_len; i++)
	{
		memcpy((void *) bp, (void *) sg_list[i].buf, sg_list[i].len);
		bp += sg_list[i].len;
	}
	txbd->length = total_len;
	txindex = ((unsigned long) txbd - (unsigned long) qi->tbase)
			/ sizeof(*txbd);
	qi->txkey[txindex] = key;
#if CACHE()
	// Note: the MPC8xx does not seem to snoop/invalidate the data cache properly!
	HAL_DCACHE_IS_ENABLED(cache_state);
	if (cache_state)
	{
		HAL_DCACHE_FLUSH(txbd->buffer, txbd->length); // Make sure no stale data
	}
#endif
	txbd->ctrl |= FEC_BD_Tx_Ready | FEC_BD_Tx_Last | FEC_BD_Tx_TC;

#if CACHE()
	if (cache_state)
	{
		/* and off it goes to the hardware! */
		HAL_DCACHE_FLUSH(txbd, sizeof(*txbd));;
	}
#endif


	/* clear halt condition, send it on it's way */
	tsec->tstat = TSTAT_THLT;

	/* for debug purposes we wait for the frame to be on it's way */
//	for (;;)
//	{
//#if CACHE()
//		if (cache_state)
//		{
//			/* and off it goes to the hardware! */
//			HAL_DCACHE_FLUSH(txbd, sizeof(*txbd));;
//		}
//#endif
//		if ((txbd->ctrl&FEC_BD_Tx_Ready)==0)
//		{
//			/* it's been sent */
//			break;
//		}
//	}


	// Remember the next buffer to try
	if (txbd->ctrl & FEC_BD_Tx_Wrap)
	{
		qi->txbd = qi->tbase;
	}
	else
	{
		qi->txbd = txbd + 1;
	}
#if CACHE()
	HAL_DCACHE_FLUSH(qi, sizeof(*qi));
#endif
}
static void spi_transaction_do (cyg_spi_device* device, cyg_bool tick_only,
                                 cyg_bool polled, cyg_uint32 count,
                                 const cyg_uint8* tx_data, cyg_uint8* rx_data,
                                 cyg_bool drop_cs)
{
    cyg_spi_freescale_dspi_bus_t* dspi_bus =
          (cyg_spi_freescale_dspi_bus_t*) device->spi_bus;
    cyg_spi_freescale_dspi_device_t* dspi_device =
          (cyg_spi_freescale_dspi_device_t*) device;
    cyg_bool bus_16bit = dspi_device->clocking.bus_16bit;
    cyghwr_devs_freescale_dspi_t* dspi_p = dspi_bus->setup_p->dspi_p;

    cyghwr_hal_freescale_dma_set_t* dma_set_p;
    cyghwr_hal_freescale_edma_t* edma_p = NULL;

    cyg_uint32 count_down;
    cyg_uint32 txfifo_n = dspi_bus->txfifo_n;
    cyg_uint32 pushr;
    cyg_uint32 pushque_n;
    cyg_uint32 dma_chan_rx_i = 0;
    cyg_uint32 dma_chan_tx_i = 0;
    cyg_uint8* rx_data0;

#if DEBUG_SPI >= 2
    cyg_uint32 first_turn = 1;
#endif

    DEBUG2_PRINTF("DSPI: transaction: count=%d drop_cs=%d tick_only=%d\n",
                  count, drop_cs, tick_only);

    // Set up peripheral CS field. DSPI automatically asserts and deasserts CS
    pushr =
#ifndef CYGOPT_DEVS_SPI_FREESCALE_DSPI_TICK_ONLY_DROPS_CS
          // Compatibility option
          // eCos Reference Manual states that CS should drop prior to sending
          // ticks, but other SPI drivers do not touch the CS.
          tick_only ? dspi_p->pushr & 0x87FF0000 :
#endif
          dspi_chip_select_set(
#ifdef CYGOPT_DEVS_SPI_FREESCALE_DSPI_TICK_ONLY_DROPS_CS
                               // Compatibility option. See comment above.
                                 tick_only ? -1 :
#endif
                                 dspi_device->dev_num,
                                 dspi_p->mcr & FREESCALE_DSPI_MCR_PCSSE_M, true);
    pushr |= FREESCALE_DSPI_PUSHR_CONT_M;

    dspi_fifo_clear(dspi_p);

    pushque_n = dspi_bus->pushque_n;
    if(bus_16bit)
        txfifo_n *= 2;

    dma_set_p = dspi_bus->setup_p->dma_set_p;
    if((count > txfifo_n) && dma_set_p) {
        rx_data0 = rx_data;
        edma_p = dma_set_p->edma_p;
        // Set up the DMA channels.
        dma_chan_rx_i = SPI_DMA_CHAN_I(dma_set_p, RX);
        dma_chan_tx_i = SPI_DMA_CHAN_I(dma_set_p, TX);
        rx_dma_channel_setup(dma_set_p, (cyg_uint8*) rx_data,
                             bus_16bit, &edma_p->tcd[dma_chan_rx_i]);
        hal_freescale_edma_erq_enable(edma_p, dma_chan_rx_i);
        dspi_irq_enable(dspi_p,
                        FREESCALE_DSPI_RSER_TFFF_RE_M   |
                        FREESCALE_DSPI_RSER_RFDF_RE_M   |
                        FREESCALE_DSPI_RSER_TFFF_DIRS_M |
                        FREESCALE_DSPI_RSER_RFDF_DIRS_M);
    } else {
        rx_data0 = NULL;
        // If byte count fits in the FIFO don't bother with DMA.
        if(dma_set_p) {
            edma_p = dma_set_p->edma_p;
            hal_freescale_edma_erq_disable(edma_p, SPI_DMA_CHAN_I(dma_set_p, RX));
        }
        dma_set_p = NULL;
        dspi_irq_disable(dspi_p,
                         FREESCALE_DSPI_RSER_TFFF_RE_M   |
                         FREESCALE_DSPI_RSER_RFDF_RE_M   |
                         FREESCALE_DSPI_RSER_TFFF_DIRS_M |
                         FREESCALE_DSPI_RSER_RFDF_DIRS_M);
    }

    if(!polled)
        cyg_drv_interrupt_unmask(dspi_bus->setup_p->intr_num);
    count_down = count;
    while(count_down) {
#if DEBUG_SPI >= 2
        if(first_turn) {
            if(dspi_bus->pushque_p)
                dspi_bus->pushque_p[0] |= FREESCALE_DSPI_PUSHR_CTCNT_M;
            first_turn = 0;
        }
#endif
        if(dma_set_p && (count_down > txfifo_n)) {
            // Transfer size is larger than DSPI FIFO
            // Use DMA Tx
            count_down = tx_dma_channel_setup(dspi_bus, (cyg_uint8*) tx_data,
                                              count_down, bus_16bit,
                                              pushr, drop_cs);
#if DEBUG_SPI >= 3
            hal_freescale_edma_transfer_diag(edma_p, dma_chan_rx_i, true);
#endif
            // Enable the Tx DMA / SPI controller.
            hal_freescale_edma_erq_enable(edma_p, dma_chan_tx_i);
            DSPI_EOQ_CLEAR(dspi_p);
        } else {
            // Transfer size fits within DSPI FIFO
            // No need for DMA Tx
            DSPI_EOQ_CLEAR(dspi_p);
            count_down = fifo_pushque_fill(dspi_bus, (cyg_uint8*) tx_data,
                                           count_down, bus_16bit,
                                           pushr, drop_cs);
#if DEBUG_SPI >= 3
            cyghwr_devs_freescale_dspi_diag(dspi_bus);
#endif
        }

        if(polled) {
            DEBUG2_PRINTF("DSPI Polled:\n");
            // Busy-wait for DSPI/DMA (polling for completion).
            while(!(dspi_p->sr & FREESCALE_DSPI_SR_EOQF_M));

            if(dma_set_p) {
                // Disable the Tx DMA channel on completion.
                hal_freescale_edma_erq_disable(edma_p, dma_chan_tx_i);
            }
        } else {
            // Wait for DSPI/DMA completion. (interrupt driven).
            cyg_drv_mutex_lock(&dspi_bus->transfer_mutex);
            cyg_drv_dsr_lock();

            DSPI_IRQ_ENABLE(dspi_p);
            DEBUG2_PRINTF("DSPI IRQ: Enabled\n");

            // Sit back and wait for the ISR/DSRs to signal completion.
            cyg_drv_cond_wait (&dspi_bus->transfer_done);

            cyg_drv_dsr_unlock();
            cyg_drv_mutex_unlock(&dspi_bus->transfer_mutex);
        }

        if(dma_set_p) {
            // Make sure that Rx has been drained by DMA.
            while((dspi_p->sr & FREESCALE_DSPI_SR_RFDF_M));
            DEBUG2_PRINTF("Fifo Drained by DMA 0x%08x\n", dspi_p->sr);
            if(count_down <= txfifo_n && count_down > 0) {
                hal_freescale_edma_erq_disable(edma_p, dma_chan_rx_i);
                dma_set_p = NULL;
            }
        } else {
            // No DMA - "manually" drain Rx FIFO
            DEBUG2_PRINTF("DSPI FIFO: 'Manually' drain Rx fifo rx_data=%p bus_16bit=%d\n",
                          rx_data, bus_16bit);
#if DEBUG_SPI >= 3
            cyghwr_devs_freescale_dspi_diag(dspi_bus);
#endif
            if(rx_data) {
                if(bus_16bit) {
                    cyg_uint16* rx_data16 = (cyg_uint16*) rx_data;
                    while(dspi_p->sr & FREESCALE_DSPI_SR_RXCTR_M) {
                        DEBUG2_PRINTF("  Fifo Pull16 at %p\n", rx_data16);
                        *rx_data16++ = dspi_p->popr;
                    }
                    rx_data = (cyg_uint8*) rx_data16;
                } else {
                    while(dspi_p->sr & FREESCALE_DSPI_SR_RXCTR_M) {
                        DEBUG2_PRINTF("  Fifo Pull at %p\n", rx_data);
                        *rx_data++ = dspi_p->popr;
                    }
                }
            }
            dspi_fifo_drain(dspi_p);
        }
        dspi_fifo_clear(dspi_p);
        // Prepare for next iteration
        if(tx_data) {
            tx_data += pushque_n;
            if(bus_16bit)
                tx_data += pushque_n;
        }
    }
    if(rx_data0) {
        // Rx buffer may be out of sync with cache.
        DEBUG2_PRINTF("DSPI DMA: Flush cache %p len=%d\n", rx_data0, count);
        HAL_DCACHE_INVALIDATE(rx_data0, count);
        DEBUG2_PRINTF("DSPI DMA: Cache flushed\n");
    }

    if(!polled)
        cyg_drv_interrupt_mask(dspi_bus->setup_p->intr_num);

    dspi_device->chip_sel = !drop_cs;
    DEBUG2_PRINTF("cyg_transaction_do() chip_sel = %d drop_cs = %d\n", dspi_device->chip_sel, drop_cs);
}