/****************************************************************************** * ae531x_RxQueueCreate - create a circular queue of Rx descriptors */ int ae531x_RxQueueCreate(ae531x_MAC_t *MACInfo, AE531X_QUEUE *q, char *pMem, int count) { int i; VIRT_ADDR descAddr; ARRIVE(); ae531x_QueueInit(q, pMem, count); q->reapDescAddr = NULL; /* Initialize Rx buffer descriptors */ for (i=0, descAddr=q->firstDescAddr; i<count; i++, descAddr=(VIRT_ADDR)((UINT32)descAddr + AE531X_QUEUE_ELE_SIZE)) { void *swptr; char *rxBuffer; int rxBufferSize; swptr = ae531x_rxbuf_alloc(MACInfo, &rxBuffer, &rxBufferSize); if (swptr == NULL) { AE531X_PRINT(AE531X_DEBUG_RESET, ("eth%d RX queue: ae531x_rxbuf_alloc failed\n", MACInfo->unit)); ae531x_QueueDestroy(q); return -1; } AE531X_DESC_SWPTR_SET(descAddr, swptr); AE531X_DESC_STATUS_SET(descAddr, DescOwnByDma); AE531X_DESC_CTRLEN_SET(descAddr, rxBufferSize); AE531X_DESC_BUFPTR_SET(descAddr, virt_to_bus(rxBuffer)); AE531X_DESC_LNKBUF_SET(descAddr, (UINT32)0); } /* for each desc */ /* Make the queue circular */ AE531X_DESC_CTRLEN_SET(q->lastDescAddr, DescEndOfRing|AE531X_DESC_CTRLEN_GET(q->lastDescAddr)); AE531X_PRINT(AE531X_DEBUG_RESET, ("eth%d Rxbuf begin = %x, end = %x\n", MACInfo->unit, (UINT32)q->firstDescAddr, (UINT32)q->lastDescAddr)); LEAVE(); return 0; }
static int ae531x_poll(struct eth_device *dev) { ae531xRxBuf_t *rxBuf, *newRxBuf; char *rxBufData; int unused_length; VIRT_ADDR rxDesc; int length; ae531x_MAC_t *MACInfo; unsigned int cmdsts; ae531x_priv_data_t *ae531x_priv; ae531x_priv = (ae531x_priv_data_t *)dev->priv; MACInfo = (ae531x_MAC_t *)&ae531x_priv->MACInfo; do { for(;;) { rxDesc = MACInfo->rxQueue.curDescAddr; cmdsts = AE531X_DESC_STATUS_GET(KSEG1ADDR(rxDesc)); if (cmdsts & DescOwnByDma) { /* There's nothing left to process in the RX ring */ goto rx_all_done; } AE531X_PRINT(AE531X_DEBUG_RX, ("consume rxDesc %p with cmdsts=0x%x\n", (void *)rxDesc, cmdsts)); AE531X_CONSUME_DESC((&MACInfo->rxQueue)); A_DATA_CACHE_INVAL(rxDesc, AE531X_DESC_SIZE); /* Process a packet */ length = AE531X_DESC_STATUS_RX_SIZE(cmdsts) - ETH_CRC_LEN; if ( (cmdsts & (DescRxFirst |DescRxLast | DescRxErrors)) == (DescRxFirst | DescRxLast) ) { /* Descriptor status indicates "NO errors" */ rxBuf = AE531X_DESC_SWPTR_GET(rxDesc); /* * Allocate a replacement buffer * We want to get another buffer ready for Rx ASAP. */ newRxBuf = ae531x_rxbuf_alloc(MACInfo, &rxBufData, &unused_length); if(newRxBuf == NULL ) { /* * Give this descriptor back to the DMA engine, * and drop the received packet. */ AE531X_PRINT(AE531X_DEBUG_ERROR, ("Can't allocate new rx buf\n")); } else { //diag_printf("rxBufData=%p\n",rxBufData); AE531X_DESC_BUFPTR_SET(rxDesc, virt_to_bus(rxBufData)); AE531X_DESC_SWPTR_SET(rxDesc, newRxBuf); } AE531X_DESC_STATUS_SET(rxDesc, DescOwnByDma); rxDesc = NULL; /* sanity -- cannot use rxDesc now */ sysWbFlush(); if (newRxBuf == NULL) { goto no_rx_bufs; } else { /* Sync data cache w.r.t. DMA */ A_DATA_CACHE_INVAL(rxBuf->data, length); /* Send the data up the stack */ AE531X_PRINT(AE531X_DEBUG_RX, ("Send data up stack: rxBuf=%p data=%p length=%d\n", (void *)rxBuf, (void *)rxBuf->data, length)); { unsigned long oldIntrState; rxBuf->next = NULL; intDisable(oldIntrState); if (ae531xRxRecvdListHead) { ae531xRxRecvdListTail->next = rxBuf; ae531xRxRecvdListTail = rxBuf; } else { ae531xRxRecvdListHead = rxBuf; ae531xRxRecvdListTail = rxBuf; } intEnable(oldIntrState); } NetReceive(rxBuf->data, length); /* Free driver Rx buffer */ ae531x_rxbuf_free(rxBuf); } } else { AE531X_PRINT(AE531X_DEBUG_ERROR, ("Bad receive. rxDesc=%p cmdsts=0x%8.8x\n", (void *)rxDesc, cmdsts)); } } } while (ae531x_ReadDmaReg(MACInfo, DmaStatus) & DmaIntRxCompleted); rx_all_done: ae531x_SetDmaReg(MACInfo, DmaIntrEnb, DmaIeRxCompleted | DmaIeRxNoBuffer); ae531x_WriteDmaReg(MACInfo, DmaRxPollDemand, 0); no_rx_bufs: return; }