/* Note: this may be called from an atomic context */
static int bcm63xx_alloc_rxdma_bds(BcmEnet_devctrl *pDevCtrl)
{
   BcmEnet_RxDma *rxdma;
   rxdma = pDevCtrl->rxdma[0];

#if defined(RXCHANNEL_PKT_RATE_LIMIT)
   /* Allocate 1 extra BD for rxBdsStdBy */
   /* The channel parameter is not used inside the function for Duna */
   rxdma->pktDmaRxInfo.rxBdsBase = bcmPktDma_EthAllocRxBds(pDevCtrl->vport_id, rxdma->pktDmaRxInfo.numRxBds + 1);
#else
   rxdma->pktDmaRxInfo.rxBdsBase = bcmPktDma_EthAllocRxBds(pDevCtrl->vport_id, rxdma->pktDmaRxInfo.numRxBds);
#endif
   if ( rxdma->pktDmaRxInfo.rxBdsBase == NULL )
   {
      printk("Unable to allocate memory for Rx Descriptors \n");
      return -ENOMEM;
   }

   /* Align BDs to a 16-byte boundary - Apr 2010 */
   rxdma->pktDmaRxInfo.rxBds = (volatile DmaDesc *)(((int)rxdma->pktDmaRxInfo.rxBdsBase + 0xF) & ~0xF);
   rxdma->pktDmaRxInfo.rxBds = (volatile DmaDesc *)CACHE_TO_NONCACHE(rxdma->pktDmaRxInfo.rxBds);

   /* Local copy of these vars also initialized to zero in bcmPktDma channel init */
   rxdma->pktDmaRxInfo.rxAssignedBds = 0;
   rxdma->pktDmaRxInfo.rxHeadIndex = rxdma->pktDmaRxInfo.rxTailIndex = 0;

#if defined(RXCHANNEL_PKT_RATE_LIMIT)
   /* stand by bd ring with only one BD */
   rxdma->rxBdsStdBy = &rxdma->pktDmaRxInfo.rxBds[rxdma->pktDmaRxInfo.numRxBds];
#endif

   return 0;
}
Esempio n. 2
0
static int dump_output (uint16 rx_index)
{
    int ret = 1;
#ifdef SPU_DEBUG
    int i = 0;
#endif
    unsigned char *rxdata = NULL;
    unsigned char *prx;
    uint16 dma_status = pdev_ctrl->rx_bds[rx_index].status;

    if (dma_status & DMA_OWN)
    {
	printk (KERN_ERR "IPSEC SPU: Nothing to process\n");
	return ret;
    }

    rxdata = (unsigned char *) pdev_ctrl->rx_bds[rx_index].address;
    //prx = (unsigned char *)IPSEC_SPU_ALIGN(rxdata, BUF_ALIGN);
    prx = (uint8 *) CACHE_TO_NONCACHE (rxdata);

#ifdef SPU_DEBUG
    printk ("BD len %d status %0x address %lx\n",
	  pdev_ctrl->rx_bds[rx_index].length,
	  pdev_ctrl->rx_bds[rx_index].status,
	  pdev_ctrl->rx_bds[rx_index].address);

    printk ("********** Received Data **********\n");

    for (i = 0; i < pdev_ctrl->rx_bds[rx_index].length; i++)
    {
	printk ("%02x ", *(prx + i));
	if (!((i + 1) % 4))
	{
	    printk ("\n");
	}
    }
#endif

    return 0;
}
static int bcm63xx_alloc_txdma_bds(BcmEnet_devctrl *pDevCtrl)
{
    BcmPktDma_EthTxDma *txdma;
    int nr_tx_bds;

    txdma = pDevCtrl->txdma[0];
    nr_tx_bds = txdma->numTxBds;

    /* BDs allocated in bcmPktDma lib in PSM or in DDR */
    /* First parameter is not used inside the function for Duna */
    txdma->txBdsBase = bcmPktDma_EthAllocTxBds(pDevCtrl->vport_id, nr_tx_bds);
    if (txdma->txBdsBase == NULL)
    {
        printk("Unable to allocate memory for Tx Descriptors \n");
        return -ENOMEM;
    }

    BCM_ENET_DEBUG("bcm63xx_alloc_txdma_bds txdma->txBdsBase 0x%x",
        (unsigned int)txdma->txBdsBase);

    txdma->txBds = txdma->txBdsBase;
    txdma->txRecycle = (BcmPktDma_txRecycle_t *)((uint32)txdma->txBds + (nr_tx_bds * sizeof(DmaDesc)));

    /* Align BDs to a 16/32 byte boundary - Apr 2010 */
    txdma->txBds = (volatile void *)(((int)txdma->txBds + 0xF) & ~0xF);
    txdma->txBds = (volatile void *)CACHE_TO_NONCACHE(txdma->txBds);
    txdma->txRecycle = (BcmPktDma_txRecycle_t *)((uint32)txdma->txBds + (nr_tx_bds * sizeof(DmaDesc)));
    txdma->txRecycle = (BcmPktDma_txRecycle_t *)NONCACHE_TO_CACHE(txdma->txRecycle);

    txdma->txFreeBds = nr_tx_bds;
    txdma->txHeadIndex = txdma->txTailIndex = 0;
    nr_tx_bds = txdma->numTxBds;

    /* BDs allocated in bcmPktDma lib in PSM or in DDR */
    memset((char *) txdma->txBds, 0, sizeof(DmaDesc) * nr_tx_bds );

    return 0;
}
Esempio n. 4
0
/***************************************************************************
 * Function Name: ipsec_setup_tx_rx
 * Description  : Setup Tx and Rx buffers for test.
 * Returns      : rx_index
 ***************************************************************************/
static int ipsec_setup_tx_rx (uint32 test_pkt_id, int *done)
{
    int i = 0;
    unsigned char *p;
    unsigned char *p8;
    unsigned char *palign;
    unsigned char *ptx;
    unsigned char *ptx_tmpl;
    unsigned char *tx_pkt;
    uint16 rx_index;
    uint16 tx_pkt_size = tx_pkt_len[test_pkt_id];
    uint16 rx_pkt_size = rx_pkt_len[test_pkt_id];
    unsigned long irq_flags;

    /*
     * Setup the Rx Buffer first
     */
    if ((p = kmalloc ((rx_pkt_size + BUF_ALIGN), GFP_KERNEL)) == NULL)
    {
	printk (KERN_ERR "IPSEC SPU: Error no memory for Rx buffer\n");
	*done = 0;
	return -ENOMEM;
    }

    rx_data = p;
    rx_index = pdev_ctrl->rx_tail;
    memset (p, 0, rx_pkt_size);
    cache_flush_len(p, rx_pkt_size + BUF_ALIGN);
    p8 = (unsigned char *) IPSEC_SPU_ALIGN (p, BUF_ALIGN);

    spin_lock_irqsave (&pdev_ctrl->spin_lock, irq_flags);
    pdev_ctrl->rx_bds[pdev_ctrl->rx_tail].address = (uint32) VIRT_TO_PHY (p8);
    pdev_ctrl->rx_bds[pdev_ctrl->rx_tail].length = rx_pkt_size;
    recv_pkt_len = rx_pkt_size;

    if (pdev_ctrl->rx_tail == (NR_RX_BDS - 1))
    {
	pdev_ctrl->rx_bds[pdev_ctrl->rx_tail].status = DMA_OWN | DMA_WRAP;
#ifdef SPU_DEBUG
	printk (KERN_ERR "IPSEC SPU: Rx BD %p addr %lx len %x sts %x\n",
	      &pdev_ctrl->rx_bds[pdev_ctrl->rx_tail],
	      pdev_ctrl->rx_bds[pdev_ctrl->rx_tail].address,
	      pdev_ctrl->rx_bds[pdev_ctrl->rx_tail].length,
	      pdev_ctrl->rx_bds[pdev_ctrl->rx_tail].status);
#endif
	pdev_ctrl->rx_tail = 0;
    }
    else
    {
	pdev_ctrl->rx_bds[pdev_ctrl->rx_tail].status = DMA_OWN;
#ifdef SPU_DEBUG
	printk (KERN_ERR "IPSEC SPU: ** Rx BD %p addr %lx len %x sts %x\n",
	      &pdev_ctrl->rx_bds[pdev_ctrl->rx_tail],
	      pdev_ctrl->rx_bds[pdev_ctrl->rx_tail].address,
	      pdev_ctrl->rx_bds[pdev_ctrl->rx_tail].length,
	      pdev_ctrl->rx_bds[pdev_ctrl->rx_tail].status);
#endif
	pdev_ctrl->rx_tail++;
    }
    spin_unlock_irqrestore(&pdev_ctrl->spin_lock, irq_flags);

    /*
     * Now setup the Tx buffer.
     */
    if ((tx_pkt = kmalloc (tx_pkt_size + BUF_ALIGN, GFP_KERNEL)) == NULL)
    {
	printk (KERN_ERR "IPSEC SPU: Error INPUT PKT OUT OF MEMORY\n");
	*done = 0;
	return -ENOMEM;
    }

    tx_data = tx_pkt;
    p8 = (unsigned char *) IPSEC_SPU_ALIGN (tx_pkt, BUF_ALIGN);
    p = (uint8 *) CACHE_TO_NONCACHE ((uint32) p8);
    memset (p, 0, tx_pkt_size);
    ptx_tmpl = (unsigned char *) tx_test_pkts[test_pkt_id];
    ptx = (unsigned char *) p;

#ifdef SPU_DEBUG
    printk (KERN_ERR "IPSEC SPU: tx_data %p p %p\n", ptx, p);
#endif

    while (i < tx_pkt_size)
    {
	*ptx = *ptx_tmpl;
	ptx++;
	ptx_tmpl++;
	i++;
    }

    palign = (unsigned char *) IPSEC_SPU_ALIGN (tx_pkt, BUF_ALIGN);

#ifdef SPU_DEBUG
    printk (KERN_ERR
	"IPSEC SPU: Setting Up Tx BD tx_pkt %p p %p phy addr 0x%lx\n", ptx,
	  p, (uint32) VIRT_TO_PHY (palign));
#endif

    spin_lock_irqsave(&pdev_ctrl->spin_lock, irq_flags);
    pdev_ctrl->tx_bds[pdev_ctrl->tx_tail].address =
                                        (uint32) VIRT_TO_PHY (palign);
    pdev_ctrl->tx_bds[pdev_ctrl->tx_tail].length = tx_pkt_size;
    xmit_pkt_len = tx_pkt_size;
    if (pdev_ctrl->tx_tail == (NR_XMIT_BDS - 1))
    {
	pdev_ctrl->tx_bds[pdev_ctrl->tx_tail].status =
                                 DMA_OWN | DMA_SOP | DMA_EOP | DMA_WRAP;
#ifdef SPU_DEBUG
	printk (KERN_ERR "IPSEC SPU: Tx BD %p addr %lx len %x sts %x\n",
	      &pdev_ctrl->tx_bds[pdev_ctrl->tx_tail],
	      pdev_ctrl->tx_bds[pdev_ctrl->tx_tail].address,
	      pdev_ctrl->tx_bds[pdev_ctrl->tx_tail].length,
	      pdev_ctrl->tx_bds[pdev_ctrl->tx_tail].status);
#endif
	pdev_ctrl->tx_tail = 0;
	pdev_ctrl->tx_free_bds = NR_XMIT_BDS;
    }
    else
    {
	pdev_ctrl->tx_bds[pdev_ctrl->tx_tail].status =
                                         DMA_OWN | DMA_SOP | DMA_EOP;
#ifdef SPU_DEBUG
	printk (KERN_ERR "IPSEC SPU: ** Tx BD %p addr %lx len %x sts %x\n",
	      &pdev_ctrl->tx_bds[pdev_ctrl->tx_tail],
	      pdev_ctrl->tx_bds[pdev_ctrl->tx_tail].address,
	      pdev_ctrl->tx_bds[pdev_ctrl->tx_tail].length,
	      pdev_ctrl->tx_bds[pdev_ctrl->tx_tail].status);
#endif
	pdev_ctrl->tx_free_bds--;
	pdev_ctrl->tx_tail++;
    }
    spin_unlock_irqrestore(&pdev_ctrl->spin_lock, irq_flags);
    return rx_index;
}
Esempio n. 5
0
/***************************************************************************
 * Function Name: encrypt_decrypt_verify
 * Description  : Verify the result of encryption or decryption operation.
 * Returns      : 0 success
 ***************************************************************************/
static int encrypt_decrypt_verify (uint16 test_pkt_id, uint16 rx_index)
{
    int ret = 1;
    //unsigned char *p;
    unsigned char *ptx = NULL;
    unsigned char *prx = NULL;
    unsigned char *ptmpl = NULL;
    unsigned char *rxdata = NULL;
    //unsigned char *tx_data = tx_test_pkts[test_pkt_id];
    unsigned char *rx_tmpl = rx_templates[test_pkt_id];
    uint16 rx_len = pdev_ctrl->rx_bds[rx_index].length;
#ifdef SPU_DEBUG
    int i = 0;
    uint16 tx_len = tx_pkt_len[test_pkt_id];
#endif
    uint16 dma_status = pdev_ctrl->rx_bds[rx_index].status;

    if (dma_status & DMA_OWN)
    {
	printk (KERN_ERR "IPSEC SPU: Nothing to process\n");
	goto clean_up;
	return ret;
    }

    rxdata = (unsigned char *) pdev_ctrl->rx_bds[rx_index].address;
    prx = (unsigned char *) IPSEC_SPU_ALIGN (rxdata, BUF_ALIGN);

#ifdef SPU_DEBUG
    printk (KERN_ERR "IPSEC SPU: Rx Buffer pres addr %p rx addr %p\n",
	  prx, rxdata);
#endif

    prx = (uint8 *) CACHE_TO_NONCACHE (prx);

#ifdef SPU_DEBUG
    printk (KERN_ERR "IPSEC SPU: Rx Buffer pres addr %p rx addr %p\n",
	  prx, rxdata);
#endif

    ptmpl = (unsigned char *) IPSEC_SPU_ALIGN (rx_tmpl, BUF_ALIGN);

    ptx = (unsigned char *) IPSEC_SPU_ALIGN (tx_data, BUF_ALIGN);
    ptx = (uint8 *) CACHE_TO_NONCACHE (ptx);

    if (memcmp (prx, rx_tmpl, rx_len) == 0)
    {
        num_tests_passed++;
        printk (KERN_ERR "IPSEC SPU: Packet [%d] Test Passed Tx Len %d "
	      "Rx Len %d Time %lx\n", test_pkt_id, xmit_pkt_len,
	      recv_pkt_len, proc_time);
    }
    else
    {
        num_tests_failed++;
        printk (KERN_ERR "IPSEC SPU: Packet [%d] Test Failed Tx Len %d "
	      "Rx Len %d Time %lx\n", test_pkt_id, xmit_pkt_len,
	      recv_pkt_len, proc_time);

#ifdef SPU_DEBUG
	for (i = 0; i < tx_len; i += 4)
	{
	    printk ("Tx Pkt %p 0x%02x%02x%02x%02x\n",
		  (ptx + i),
		  *(ptx + i), *(ptx + i + 1), *(ptx + i + 2), *(ptx + i + 3));

	}

	for (i = 0; i < rx_len; i += 4)
	{
	    printk
	    ("Rx Pkt %p 0x%02x%02x%02x%02x \t Rx Exp %p 0x%02x%02x%02x%02x\n",
	     (prx + i), *(prx + i), *(prx + i + 1), *(prx + i + 2),
	     *(prx + i + 3), (ptmpl + i), *(ptmpl + i), *(ptmpl + i + 1),
	     *(ptmpl + i + 2), *(ptmpl + i + 3));
	}
#endif

    }

    ret = 0;

clean_up:

    /*
     * Clean up the buffers allocated for tx and rx
     * before leaving.
     */
    kfree (rx_data);
    kfree (tx_data);
    kfree (tx_hdr);

    return ret;
}
Esempio n. 6
0
/*
 * dma ring allocation is done here
 */
static int enet_dma_init(struct tangox_enet_priv *priv)
{
	unsigned int size;
	int i, rx_order, tx_order;
	
	/*
	 * allocate rx descriptor list & rx buffers
	 */
	size = RX_DESC_COUNT * sizeof (struct enet_desc);
	for (rx_order = 0; (PAGE_SIZE << rx_order) < size; rx_order++);

	if (!(priv->rx_descs_cached = (void *)__get_free_pages(GFP_KERNEL | GFP_DMA, rx_order)))
		return -ENOMEM;
	dma_cache_wback_inv((unsigned long)priv->rx_descs_cached, size);
	priv->rx_descs = (volatile struct enet_desc *)
		CACHE_TO_NONCACHE((unsigned long)priv->rx_descs_cached);

	/*
	 * initialize all rx descs
	 */
	for (i = 0; i < RX_DESC_COUNT; i++) {
		volatile struct enet_desc *rx;
		struct sk_buff *skb;

		rx = &priv->rx_descs[i];
		rx->config = RX_BUF_SIZE | DESC_BTS(2) | DESC_EOF/* | DESC_ID*/;

		skb = dev_alloc_skb(RX_BUF_SIZE + SKB_RESERVE_SIZE);
		if (!skb)
			return -ENOMEM;
		
		skb_reserve(skb, SKB_RESERVE_SIZE);
		*((volatile unsigned long *)KSEG1ADDR(&(priv->rx_report[i]))) = 0; 
		rx->s_addr = PHYSADDR((void *)skb->data);
		rx->r_addr = PHYSADDR((void *)&priv->rx_report[i]);
		rx->n_addr = PHYSADDR((void *)&priv->rx_descs[i+1]);
		if (i == (RX_DESC_COUNT - 1)) {
			rx->n_addr = PHYSADDR((void *)&priv->rx_descs[0]);
			rx->config |= DESC_EOC ;
			priv->rx_eoc = i;
		}
#ifdef ETH_DEBUG
		DBG("rx[%d]=0x%08x\n", i, (unsigned int)rx);
		DBG("  s_addr=0x%08x\n", (unsigned int)rx->s_addr);
		DBG("  n_addr=0x%08x\n", (unsigned int)rx->n_addr);
		DBG("  r_addr=0x%08x\n", (unsigned int)rx->r_addr);
		DBG("  config=0x%08x\n", (unsigned int)rx->config);
#endif
		dma_cache_inv((unsigned long)skb->data, RX_BUF_SIZE);
		priv->rx_skbs[i] = skb;
	}
	priv->last_rx_desc = 0;

	/*
	 * allocate tx descriptor list
	 *
	 * We allocate  only the descriptor list and  prepare them for
	 * further use. When tx is needed, we will set the right flags
	 * and kick the dma.
	 */
	size = TX_DESC_COUNT * sizeof (struct enet_desc);
	for (tx_order = 0; (PAGE_SIZE << tx_order) < size; tx_order++);

	if (!(priv->tx_descs_cached = (void *)__get_free_pages(GFP_KERNEL | GFP_DMA, tx_order))) {
		free_pages((u32)priv->rx_descs_cached, rx_order);
		return -ENOMEM;
	}
	dma_cache_wback_inv((unsigned long)priv->tx_descs_cached, size);
	priv->tx_descs = (volatile struct enet_desc *)
		CACHE_TO_NONCACHE((unsigned long)priv->tx_descs_cached);

	/*
	 * initialize tx descs
	 */
	for (i = 0; i < TX_DESC_COUNT; i++) {
		volatile struct enet_desc *tx;

		priv->tx_bufs[i] = (unsigned char *)__get_free_page(GFP_KERNEL | GFP_DMA);
		dma_cache_wback_inv((unsigned long)priv->tx_bufs[i], PAGE_SIZE);

		tx = &priv->tx_descs[i];
		*((volatile unsigned long *)KSEG1ADDR(&(priv->tx_report[i]))) = 0; 
		tx->r_addr = PHYSADDR((void *)&priv->tx_report[i]);
		tx->s_addr = 0;
		tx->config = DESC_EOF;
		if (i == (TX_DESC_COUNT - 1)) {
			tx->config |= DESC_EOC;
			tx->n_addr = PHYSADDR((void *)&priv->tx_descs[0]);
			priv->tx_eoc = i;
		}
		//DBG("tx[%d]=0x%08x\n", i, (unsigned int)tx);
	}
	priv->dirty_tx_desc = priv->next_tx_desc = 0;
	priv->pending_tx = -1;
	priv->pending_tx_cnt  = 0;
	priv->reclaim_limit  = -1;
	priv->free_tx_desc_count = TX_DESC_COUNT;

	/*
	 * write rx desc list & tx desc list addresses in registers
	 */
	enet_writel(ENET_TX_DESC_ADDR(priv->enet_mac_base), PHYSADDR((void *)&priv->tx_descs[0]));
	enet_writel(ENET_RX_DESC_ADDR(priv->enet_mac_base), PHYSADDR((void *)&priv->rx_descs[0]));
	return 0;
}
Esempio n. 7
0
/* Return non-aligned, cache-based pointer to caller - Apr 2010 */
DmaDesc * bcmPktDma_EthAllocRxBds(int channel, int numBds)
{
#if defined(ENET_RX_BDS_IN_PSM) && (defined(CONFIG_BCM_FAP) || defined(CONFIG_BCM_FAP_MODULE))
#if defined(CONFIG_BCM_PKTDMA_RX_SPLITTING)
    if(g_Eth_rx_iudma_ownership[channel] == HOST_OWNED)
    {
        void * p;

        /* Rx Descriptors are allocated in DDR */
        if ((p = kmalloc(numBds*sizeof(DmaDesc), GFP_ATOMIC))) {
            memset(p, 0, numBds*sizeof(DmaDesc));
            cache_flush_len(p, numBds*sizeof(DmaDesc));
        }
        return( (DmaDesc *)(CACHE_TO_NONCACHE(p)) );   /* rx bd ring */
    }
    else
#endif
{
    uint8 * pMem;
    static uint8 * rxBdAllocation[ENET_RX_CHANNELS_MAX] = {NULL};
    static int rxNumBds[ENET_RX_CHANNELS_MAX] = {0};
    uint32 fapIdx;

    /* Restore previous BD allocation pointer if any */
    pMem = rxBdAllocation[channel];

    if (pMem)
    {
        if(rxNumBds[channel] != numBds)
        {
            printk("ERROR: Tried to allocate a different number of rxBDs (was %d, attempted %d)\n",
                    rxNumBds[channel], numBds);
            printk("       Eth rx BD allocation rejected!!\n");
            return( NULL );
        }
        memset(pMem, 0, numBds * sizeof(DmaDesc));
        return((DmaDesc *)pMem);   /* rx bd ring */
    }

    /* Try to allocate Rx Descriptors in PSM. Use Host-side addressing here. */
    /* fapDrv_psmAlloc guarantees byte alignment. */
    /* channel is iudma in this instance */
    fapIdx = getFapIdxFromEthRxIudma(channel);
    if ( fapIdx == FAP_INVALID_IDX )
    {
        printk("ERROR: bcmPktDma_psmAlloc for non-FAP channel (%d / %d)!!\n", channel, g_Eth_tx_iudma_ownership[channel]);
        return NULL;
    }

    pMem = bcmPktDma_psmAlloc(fapIdx, numBds * sizeof(DmaDesc));
    if(pMem != FAP4KE_OUT_OF_PSM)
    {
        memset(pMem, 0, numBds * sizeof(DmaDesc));
        rxBdAllocation[channel] = pMem;
        rxNumBds[channel] = numBds;
        return((DmaDesc *)pMem);   /* rx bd ring */
    }

    printk("ERROR: Out of PSM. Eth rx BD allocation rejected!!\n");
    return( NULL );
}
#else   /* !defined(ENET_RX_BDS_IN_PSM) && (defined(CONFIG_BCM_FAP) || defined(CONFIG_BCM_FAP_MODULE)) */
{
    void * p;

    /* Allocate Rx Descriptors in DDR */
    /* Leave room for alignment by caller - Apr 2010 */
    if ((p = kmalloc(numBds * sizeof(DmaDesc) + 0x10, GFP_KERNEL))) {
        memset(p, 0, numBds * sizeof(DmaDesc) + 0x10);
        cache_flush_len(p, numBds * sizeof(DmaDesc) + 0x10);
    }
    return((DmaDesc *)p);   /* rx bd ring */
}
#endif   /* defined(ENET_RX_BDS_IN_PSM) && (defined(CONFIG_BCM_FAP) || defined(CONFIG_BCM_FAP_MODULE)) */
}