static int spu_format_input (u_int8_t * cmd_buf, 
                             int *offset, 
                             struct spu_pkt_frag *frag_list, 
                             int nfrags,
                             SACTX * devsa)
{
    uint32_t flags = 0;
    int status = BCM_STATUS_OK;
    struct spu_pkt_frag *pkt_frags = frag_list;
    int startbd;

    SPU_TRACE(("spu_format_input: Descriptors available dmactx %p "
              "phys %lx \n", devsa->dmactx, VIRT_TO_PHY (devsa->dmactx)));

    SPU_DATA_DUMP("cmd buf", cmd_buf, *offset);

    /*
     * Assign the first buffer descriptor for the message header.
     */
    flags = DMA_SOP;
    startbd = pdev_ctrl->tx_tail;
    spu_assign_input_desc (cmd_buf, *offset, flags);

    flags = DMA_OWN;
    while(pkt_frags)
    {
        SPU_DATA_DUMP("**** Tx Data ****", pkt_frags->buf, pkt_frags->len);
        spu_assign_input_desc (pkt_frags->buf, pkt_frags->len, flags);
        pkt_frags = pkt_frags->next;
    }

    /*
     * Setup the descriptor with the status word.
     */
    flags = DMA_EOP | DMA_OWN;
    memset(devsa->dmaStatus, 0, sizeof (uint32_t));
    SPU_TRACE(("spu_format_input: Last frag data %p phys %lx\n",
               devsa->dmaStatus, VIRT_TO_PHY(devsa->dmaStatus)));
    spu_assign_input_desc (devsa->dmaStatus, sizeof (uint32_t), flags);

    /* pass SOP to HW now */
    pdev_ctrl->tx_bds[startbd].status |= DMA_OWN;

    return status;
} /* spu_format_input */
static void setup_rxdma_channel(BcmEnet_devctrl *pDevCtrl)
{
    BcmEnet_RxDma *rxdma = pDevCtrl->rxdma[0];
    volatile DmaRegs *dmaCtrl = pDevCtrl->dmaCtrl;
    int phy_chan = 0;
    DmaStateRam *StateRam = (DmaStateRam *)&dmaCtrl->stram.s[phy_chan * 2];

    memset(StateRam, 0, sizeof(DmaStateRam));

    BCM_ENET_DEBUG("Setup rxdma channel %d, baseDesc 0x%x\n", 0,
        (unsigned int)VIRT_TO_PHY((uint32 *)rxdma->pktDmaRxInfo.rxBds));

    rxdma->pktDmaRxInfo.rxDma->cfg = 0;
    rxdma->pktDmaRxInfo.rxDma->maxBurst = DMA_MAX_BURST_LENGTH;
    rxdma->pktDmaRxInfo.rxDma->intMask = 0;
    rxdma->pktDmaRxInfo.rxDma->intStat = DMA_DONE | DMA_NO_DESC | DMA_BUFF_DONE;
    rxdma->pktDmaRxInfo.rxDma->intMask = DMA_DONE | DMA_NO_DESC | DMA_BUFF_DONE;

    dmaCtrl->stram.s[phy_chan * 2].baseDescPtr =
            (uint32)VIRT_TO_PHY((uint32 *)rxdma->pktDmaRxInfo.rxBds);
}
static void setup_txdma_channel(BcmEnet_devctrl *pDevCtrl)
{
    DmaStateRam *StateRam;
    BcmPktDma_EthTxDma *txdma;
    volatile DmaRegs *dmaCtrl = pDevCtrl->dmaCtrl;
    int phy_chan = 0;
    /* txdma[0] because there's only one TX channel */
    txdma = pDevCtrl->txdma[0];

    StateRam = (DmaStateRam *)&dmaCtrl->stram.s[(phy_chan * 2) + 1];
    memset(StateRam, 0, sizeof(DmaStateRam));

    BCM_ENET_DEBUG("setup_txdma_channel: baseDesc 0x%x\n",
        (unsigned int)VIRT_TO_PHY((uint32 *)txdma->txBds));

    txdma->txDma->cfg = 0;
    txdma->txDma->maxBurst = DMA_MAX_BURST_LENGTH;
    txdma->txDma->intMask = 0;

    dmaCtrl->stram.s[(phy_chan * 2) + 1].baseDescPtr =
        (uint32)VIRT_TO_PHY((uint32 *)txdma->txBds);
}
Пример #4
0
/***************************************************************************
 * Function Name: ipsec_setup_tx_rx
 * Description  : Setup Tx and Rx buffers for test.
 * Returns      : rx_index
 ***************************************************************************/
static int ipsec_setup_tx_rx (uint32 test_pkt_id, int *done)
{
    int i = 0;
    unsigned char *p;
    unsigned char *p8;
    unsigned char *palign;
    unsigned char *ptx;
    unsigned char *ptx_tmpl;
    unsigned char *tx_pkt;
    uint16 rx_index;
    uint16 tx_pkt_size = tx_pkt_len[test_pkt_id];
    uint16 rx_pkt_size = rx_pkt_len[test_pkt_id];
    unsigned long irq_flags;

    /*
     * Setup the Rx Buffer first
     */
    if ((p = kmalloc ((rx_pkt_size + BUF_ALIGN), GFP_KERNEL)) == NULL)
    {
	printk (KERN_ERR "IPSEC SPU: Error no memory for Rx buffer\n");
	*done = 0;
	return -ENOMEM;
    }

    rx_data = p;
    rx_index = pdev_ctrl->rx_tail;
    memset (p, 0, rx_pkt_size);
    cache_flush_len(p, rx_pkt_size + BUF_ALIGN);
    p8 = (unsigned char *) IPSEC_SPU_ALIGN (p, BUF_ALIGN);

    spin_lock_irqsave (&pdev_ctrl->spin_lock, irq_flags);
    pdev_ctrl->rx_bds[pdev_ctrl->rx_tail].address = (uint32) VIRT_TO_PHY (p8);
    pdev_ctrl->rx_bds[pdev_ctrl->rx_tail].length = rx_pkt_size;
    recv_pkt_len = rx_pkt_size;

    if (pdev_ctrl->rx_tail == (NR_RX_BDS - 1))
    {
	pdev_ctrl->rx_bds[pdev_ctrl->rx_tail].status = DMA_OWN | DMA_WRAP;
#ifdef SPU_DEBUG
	printk (KERN_ERR "IPSEC SPU: Rx BD %p addr %lx len %x sts %x\n",
	      &pdev_ctrl->rx_bds[pdev_ctrl->rx_tail],
	      pdev_ctrl->rx_bds[pdev_ctrl->rx_tail].address,
	      pdev_ctrl->rx_bds[pdev_ctrl->rx_tail].length,
	      pdev_ctrl->rx_bds[pdev_ctrl->rx_tail].status);
#endif
	pdev_ctrl->rx_tail = 0;
    }
    else
    {
	pdev_ctrl->rx_bds[pdev_ctrl->rx_tail].status = DMA_OWN;
#ifdef SPU_DEBUG
	printk (KERN_ERR "IPSEC SPU: ** Rx BD %p addr %lx len %x sts %x\n",
	      &pdev_ctrl->rx_bds[pdev_ctrl->rx_tail],
	      pdev_ctrl->rx_bds[pdev_ctrl->rx_tail].address,
	      pdev_ctrl->rx_bds[pdev_ctrl->rx_tail].length,
	      pdev_ctrl->rx_bds[pdev_ctrl->rx_tail].status);
#endif
	pdev_ctrl->rx_tail++;
    }
    spin_unlock_irqrestore(&pdev_ctrl->spin_lock, irq_flags);

    /*
     * Now setup the Tx buffer.
     */
    if ((tx_pkt = kmalloc (tx_pkt_size + BUF_ALIGN, GFP_KERNEL)) == NULL)
    {
	printk (KERN_ERR "IPSEC SPU: Error INPUT PKT OUT OF MEMORY\n");
	*done = 0;
	return -ENOMEM;
    }

    tx_data = tx_pkt;
    p8 = (unsigned char *) IPSEC_SPU_ALIGN (tx_pkt, BUF_ALIGN);
    p = (uint8 *) CACHE_TO_NONCACHE ((uint32) p8);
    memset (p, 0, tx_pkt_size);
    ptx_tmpl = (unsigned char *) tx_test_pkts[test_pkt_id];
    ptx = (unsigned char *) p;

#ifdef SPU_DEBUG
    printk (KERN_ERR "IPSEC SPU: tx_data %p p %p\n", ptx, p);
#endif

    while (i < tx_pkt_size)
    {
	*ptx = *ptx_tmpl;
	ptx++;
	ptx_tmpl++;
	i++;
    }

    palign = (unsigned char *) IPSEC_SPU_ALIGN (tx_pkt, BUF_ALIGN);

#ifdef SPU_DEBUG
    printk (KERN_ERR
	"IPSEC SPU: Setting Up Tx BD tx_pkt %p p %p phy addr 0x%lx\n", ptx,
	  p, (uint32) VIRT_TO_PHY (palign));
#endif

    spin_lock_irqsave(&pdev_ctrl->spin_lock, irq_flags);
    pdev_ctrl->tx_bds[pdev_ctrl->tx_tail].address =
                                        (uint32) VIRT_TO_PHY (palign);
    pdev_ctrl->tx_bds[pdev_ctrl->tx_tail].length = tx_pkt_size;
    xmit_pkt_len = tx_pkt_size;
    if (pdev_ctrl->tx_tail == (NR_XMIT_BDS - 1))
    {
	pdev_ctrl->tx_bds[pdev_ctrl->tx_tail].status =
                                 DMA_OWN | DMA_SOP | DMA_EOP | DMA_WRAP;
#ifdef SPU_DEBUG
	printk (KERN_ERR "IPSEC SPU: Tx BD %p addr %lx len %x sts %x\n",
	      &pdev_ctrl->tx_bds[pdev_ctrl->tx_tail],
	      pdev_ctrl->tx_bds[pdev_ctrl->tx_tail].address,
	      pdev_ctrl->tx_bds[pdev_ctrl->tx_tail].length,
	      pdev_ctrl->tx_bds[pdev_ctrl->tx_tail].status);
#endif
	pdev_ctrl->tx_tail = 0;
	pdev_ctrl->tx_free_bds = NR_XMIT_BDS;
    }
    else
    {
	pdev_ctrl->tx_bds[pdev_ctrl->tx_tail].status =
                                         DMA_OWN | DMA_SOP | DMA_EOP;
#ifdef SPU_DEBUG
	printk (KERN_ERR "IPSEC SPU: ** Tx BD %p addr %lx len %x sts %x\n",
	      &pdev_ctrl->tx_bds[pdev_ctrl->tx_tail],
	      pdev_ctrl->tx_bds[pdev_ctrl->tx_tail].address,
	      pdev_ctrl->tx_bds[pdev_ctrl->tx_tail].length,
	      pdev_ctrl->tx_bds[pdev_ctrl->tx_tail].status);
#endif
	pdev_ctrl->tx_free_bds--;
	pdev_ctrl->tx_tail++;
    }
    spin_unlock_irqrestore(&pdev_ctrl->spin_lock, irq_flags);
    return rx_index;
}
/*
 *  init_buffers: initialize driver's pools of receive buffers
 */
static int init_buffers(BcmEnet_devctrl *pDevCtrl)
{
#if !defined(_CONFIG_BCM_BPM)
    const unsigned long BlockSize = (64 * 1024);
    const unsigned long BufsPerBlock = BlockSize / BCM_PKTBUF_SIZE;
    unsigned long AllocAmt;
    unsigned char *pFkBuf;
    int j=0;
#endif
    int i;
    unsigned char *pSkbuff;
    unsigned long BufsToAlloc;
#if defined(RXCHANNEL_PKT_RATE_LIMIT)
    unsigned char *data;
#endif
    BcmEnet_RxDma *rxdma;
    uint32 context = 0;

    RECYCLE_CONTEXT(context)->channel = 0;

    TRACE(("bcm63xxenet: init_buffers\n"));

    /* allocate recieve buffer pool */
    rxdma = pDevCtrl->rxdma[0];
    /* Local copy of these vars also initialized to zero in bcmPktDma channel init */
    rxdma->pktDmaRxInfo.rxAssignedBds = 0;
    rxdma->pktDmaRxInfo.rxHeadIndex = rxdma->pktDmaRxInfo.rxTailIndex = 0;
    BufsToAlloc = rxdma->pktDmaRxInfo.numRxBds;

#if defined(_CONFIG_BCM_BPM)
    if (enet_bpm_alloc_buf_ring(pDevCtrl, BufsToAlloc) == GBPM_ERROR)
    {
        printk(KERN_NOTICE "Eth: Low memory.\n");

        /* release all allocated receive buffers */
        enet_bpm_free_buf_ring(pDevCtrl);
        return -ENOMEM;
    }
#else
    if( (rxdma->buf_pool = kzalloc(BufsToAlloc * sizeof(uint32_t) + 0x10,
        GFP_ATOMIC)) == NULL )
    {
        printk(KERN_NOTICE "Eth: Low memory.\n");
        return -ENOMEM;
    }

    while(BufsToAlloc) {
        AllocAmt = (BufsPerBlock < BufsToAlloc) ? BufsPerBlock : BufsToAlloc;
        if( (data = kmalloc(AllocAmt * BCM_PKTBUF_SIZE + 0x10, GFP_ATOMIC)) == NULL )
        {
            /* release all allocated receive buffers */
            printk(KERN_NOTICE CARDNAME": Low memory.\n");
            for (i = 0; i < j; i++) {
                if (rxdma->buf_pool[i]) {
                    kfree(rxdma->buf_pool[i]);
                    rxdma->buf_pool[i] = NULL;
                }
            }
            return -ENOMEM;
        }

        rxdma->buf_pool[j++] = data;
        /* Align data buffers on 16-byte boundary - Apr 2010 */
        data = (unsigned char *) (((UINT32) data + 0x0f) & ~0x0f);
        for (i = 0, pFkBuf = data; i < AllocAmt; i++, pFkBuf += BCM_PKTBUF_SIZE) {
            /* Place a FkBuff_t object at the head of pFkBuf */
            fkb_preinit(pFkBuf, (RecycleFuncP)bcm63xx_enet_recycle, context);
            flush_assign_rx_buffer(pDevCtrl, 0, /* headroom not flushed */
                        PFKBUFF_TO_PDATA(pFkBuf,BCM_PKT_HEADROOM),
                        (uint8_t*)pFkBuf + BCM_PKTBUF_SIZE);
        }
        BufsToAlloc -= AllocAmt;
    }
#endif

    if (!rxdma->skbs_p)
    { /* CAUTION!!! DONOT reallocate SKB pool */
        /*
         * Dynamic allocation of skb logic assumes that all the skb-buffers
         * in 'freeSkbList' belong to the same contiguous address range. So if you do any change
         * to the allocation method below, make sure to rework the dynamic allocation of skb
         * logic. look for kmem_cache_create, kmem_cache_alloc and kmem_cache_free functions
         * in this file
        */
        if( (rxdma->skbs_p = kmalloc(
                        (rxdma->pktDmaRxInfo.numRxBds * BCM_SKB_ALIGNED_SIZE) + 0x10,
                        GFP_ATOMIC)) == NULL )
            return -ENOMEM;

        memset(rxdma->skbs_p, 0,
                    (rxdma->pktDmaRxInfo.numRxBds * BCM_SKB_ALIGNED_SIZE) + 0x10);

        rxdma->freeSkbList = NULL;

        /* Chain socket skbs */
        for(i = 0, pSkbuff = (unsigned char *)
            (((unsigned long) rxdma->skbs_p + 0x0f) & ~0x0f);
                i < rxdma->pktDmaRxInfo.numRxBds; i++, pSkbuff += BCM_SKB_ALIGNED_SIZE)
        {
            ((struct sk_buff *) pSkbuff)->next_free = rxdma->freeSkbList;
            rxdma->freeSkbList = (struct sk_buff *) pSkbuff;
        }
    }
    rxdma->end_skbs_p = rxdma->skbs_p + (rxdma->pktDmaRxInfo.numRxBds * BCM_SKB_ALIGNED_SIZE) + 0x10;


#if defined(RXCHANNEL_PKT_RATE_LIMIT)
    /* Initialize the StdBy BD Ring */
    {
    if( (data = kmalloc(BCM_PKTBUF_SIZE, GFP_ATOMIC)) == NULL ) {
        /* release all allocated receive buffers */
        printk(KERN_NOTICE CARDNAME": Low memory.\n");
        return -ENOMEM;
    }
    rxdma->StdByBuf = data;
    rxdma->rxBdsStdBy[0].address =
             (uint32)VIRT_TO_PHY(data + BCM_PKT_HEADROOM);
    rxdma->rxBdsStdBy[0].length  = BCM_MAX_PKT_LEN;
    rxdma->rxBdsStdBy[0].status = DMA_OWN | DMA_WRAP;
    }
#endif /* defined(RXCHANNEL_PKT_RATE_LIMIT) */

    return 0;
}