Ejemplo n.º 1
0
static inline
struct eth_pbuf *eth_l2fw_copy_packet_withoutXor(struct eth_pbuf *pRxPktInfo)
{
    MV_U8 *pSrc;
    MV_U8 *pDst;
    struct bm_pool *pool;
    struct eth_pbuf *pTxPktInfo;

    mvOsCacheInvalidate(NULL, pRxPktInfo->pBuf + pRxPktInfo->offset,
                        pRxPktInfo->bytes);

    pool = &mv_eth_pool[pRxPktInfo->pool];
    pTxPktInfo = mv_eth_pool_get(pool);
    if (pTxPktInfo == NULL) {
        mvOsPrintf("pTxPktInfo == NULL in %s\n", __func__);
        return NULL;
    }
    pSrc = pRxPktInfo->pBuf +  pRxPktInfo->offset + MV_ETH_MH_SIZE;
    pDst = pTxPktInfo->pBuf +  pTxPktInfo->offset + MV_ETH_MH_SIZE;

    memcpy(pDst+12, pSrc+12, pRxPktInfo->bytes-12);
    l2fw_copy_and_swap_mac(pRxPktInfo, pTxPktInfo);
    pTxPktInfo->bytes = pRxPktInfo->bytes;
    mvOsCacheFlush(NULL, pTxPktInfo->pBuf + pTxPktInfo->offset, pTxPktInfo->bytes);

    return pTxPktInfo;
}
Ejemplo n.º 2
0
/* Rx tasklet */
static void tdm_if_pcm_rx_process(unsigned long arg)
#endif
{
	TRC_REC("->%s\n",__FUNCTION__);
	if(pcm_enable) {
		if(rxBuff == NULL) {
			TRC_REC("%s: Error, empty Rx processing\n",__FUNCTION__);
			return;
		}

		/* Fill TDM Rx aggregated buffer */
#ifdef CONFIG_MV_TDM_SUPPORT
		if(mvTdmRx(rxBuff) == MV_OK)
			tdm_if_register_ops->tdm_if_pcm_ops.pcm_rx_callback(rxBuff, buff_size); /* Dispatch Rx handler */
#else
		if(mvCommUnitRx(rxBuff) == MV_OK) {
			tdm_if_register_ops->tdm_if_pcm_ops.pcm_rx_callback(rxBuff, buff_size); /* Dispatch Rx handler */
			/* Since data buffer is shared among MCDMA and CPU, need to invalidate 	
				before it accessed by MCDMA	*/
			mvOsCacheInvalidate(NULL, rxBuff, buff_size);
		}
#endif
		else
			printk("%s: could not fill Rx buffer\n",__FUNCTION__);

	}
	
	/* Clear rxBuff for next iteration */
	rxBuff = NULL;

	TRC_REC("<-%s\n",__FUNCTION__);
	return;
}
Ejemplo n.º 3
0
/*=======================================================================*/
void dma_memzero(void *to, __kernel_size_t n)
{
	u32 phys_from, phys_to;	
	u32 unaligned_to;
	unsigned long flags;	

	DPRINTK("dma_memcopy: entering\n");

	/* This is used in the very early stages */
	if(!idma_init)
    		return asm_memzero(to ,n);

	/* Fallback for the case that one or both buffers are not physically contiguous  */
	if(!virt_addr_valid(to))
        {
		DPRINTK("Failing back to asm_memzero because of limitations\n");
            return asm_memzero(to ,n);
        }	

	++dma_memzero_cnt;	

	/*
	 * If buffer start addr is not cache line-aligned, so we need to make it so.
	 */
	unaligned_to = (u32)to & 31;
	if(unaligned_to)
	{
		DPRINTK("Fixing up starting address %d bytes\n", 32 - unaligned_to);

		asm_memzero(to, 32 - unaligned_to);

		to = (void*)((u32)to + (32 - unaligned_to));

                /*it's ok, n supposed to be greater than 32 bytes at this point*/
		n -= (32 - unaligned_to);
	}	

	/*
	 * If buffer end addr is not cache line-aligned, so we need to make it so.
	 */
	unaligned_to = ((u32)to + n) & 31;
	if(unaligned_to)
	{	
		u32 tmp_to = (u32)to + (n - unaligned_to);
		DPRINTK("Fixing ending alignment %d bytes\n", unaligned_to);

		asm_memzero((void *)tmp_to, unaligned_to);

                /*it's ok, n supposed to be greater than 32 bytes at this point*/
		n -= unaligned_to;
	}

	phys_from = physical_address((u32)dmaMemInitBuff, 0);
        phys_to = physical_address((u32)to, 1);

	/*
	 *  Prepare the IDMA.
	 */
	if ((!phys_from) || (!phys_to))
        {
	    /* The requested page isn't available, fall back to */
            DPRINTK(" no physical address, fall back: to %p \n", to);
            return asm_memzero(to,n);
        }

        spin_lock_irqsave(&current->mm->page_table_lock, flags);
	if (idma_busy)
	{
            /* 
	     * The idma engine is busy, 
  	     * might happen when dma_copy_to/from_user will call the arch_copy_to/from_user 
	     * which might cause a page fault, that can lead to a memcpy or memzero.	
	     */
            DPRINTK(" idma is busy... \n");
	    spin_unlock_irqrestore(&current->mm->page_table_lock, flags);
            return asm_memzero(to,n);
	}
	idma_busy = 1;

	/* Ensure that the destination revion is invalidated */
	mvOsCacheInvalidate(NULL, (void *)to, n);
	
	/* Start DMA */
        DPRINTK(" activate DMA: channel %d from %x with source hold to %x len %x\n",CPY_CHAN1, phys_from, phys_to, n);
     	mvDmaMemInit(CPY_CHAN1, phys_from, phys_to, n);
	
#ifdef RT_DEBUG
	dma_activations++;
#endif
        
	if(wait_for_idma(CPY_CHAN1))
        {
	    BUG(); 
	}	

        DPRINTK("dma_memzero(0x%x, %lu): exiting\n", (u32) to, n);

	idma_busy = 0;
	spin_unlock_irqrestore(&current->mm->page_table_lock, flags);
}
static int mvEgigaRx(struct eth_device *dev)
{
	egigaPriv *priv = dev->priv;
	MV_U8 *pkt;
	int packets_done = 0;
	int num_recieved_packets, pool_id;
	MV_U32 status;
	MV_PP2_PHYS_RXQ_CTRL *pRxq;
	PP2_RX_DESC *pDesc;

	if (priv->devInit != MV_TRUE || priv->devEnable != MV_TRUE)
		return 0; /* port is not initialized or not enabled */

	pRxq = mvPp2RxqHndlGet(priv->port, EGIGA_DEF_RXQ);
	num_recieved_packets = mvPp2RxqBusyDescNumGet(priv->port, EGIGA_DEF_RXQ);
	packets_done = num_recieved_packets;

	while (num_recieved_packets--) {
		pDesc = mvPp2RxqNextDescGet(pRxq);
		/* cache invalidate - descriptor */
		mvOsCacheLineInv(NULL, pDesc);
#if defined(MV_CPU_BE)
		mvNetaRxqDescSwap(pDesc);//TODO
#endif /* MV_CPU_BE */
		status = pDesc->status;

		/* drop packets with error or with buffer header (MC, SG) */
		if ((status & PP2_RX_BUF_HDR_MASK) || (status & PP2_RX_ES_MASK)) {
#if defined(MV_CPU_BE)
			mvNetaRxqDescSwap(pDesc);//TODO
#endif /* MV_CPU_BE */
			mvOsCacheLineFlushInv(NULL, pDesc);
			continue;
		}
		/* TODO: drop fragmented packets */

		/* cache invalidate - packet */
		mvOsCacheInvalidate(NULL, (void *)pDesc->bufPhysAddr, RX_BUFFER_SIZE);

		/* give packet to stack - skip on first 2 bytes + buffer header */
		pkt = ((MV_U8 *)pDesc->bufPhysAddr) + 2 + BUFF_HDR_OFFS;
		NetReceive(pkt, (int)pDesc->dataSize - 2);

		/* refill: pass packet back to BM */
		pool_id = (status & PP2_RX_BM_POOL_ALL_MASK) >> PP2_RX_BM_POOL_ID_OFFS;
		mvBmPoolPut(pool_id, (MV_ULONG) pDesc->bufPhysAddr, (MV_ULONG) pDesc->bufCookie);

		/* cache invalidate - packet */
#if defined(MV_CPU_BE)
		mvNetaRxqDescSwap(pDesc);//TODO
#endif /* MV_CPU_BE */
		mvOsCacheInvalidate(NULL, (void *)pDesc->bufPhysAddr, RX_BUFFER_SIZE);

	}
	/* cache invalidate - descriptor */
	mvOsCacheLineInv(NULL, pDesc);

	mvPp2RxqDescNumUpdate(priv->port, EGIGA_DEF_RXQ, packets_done, packets_done);

	return 0;
}