static int mv_eth_hwf_pool_add(MV_BM_POOL *pBmPool, int bufNum)
{
	int      i;
	void     *pVirt;
	MV_ULONG physAddr;

	/* Check total number of buffers doesn't exceed capacity */
	if ((bufNum < 0) ||
		((bufNum + pBmPool->bufNum) > (pBmPool->capacity))) {

		mvOsPrintf("%s: to many %d buffers for BM pool #%d: capacity=%d, buf_num=%d\n",
			   __func__, bufNum, pBmPool->pool, pBmPool->capacity, pBmPool->bufNum);
		return 0;
	}
	/* Allocate buffers for the pool */
	for (i = 0; i < bufNum; i++) {

		pVirt = mvOsIoCachedMalloc(NULL, pBmPool->bufSize, &physAddr, NULL);
		if (pVirt == NULL) {
			mvOsPrintf("%s: Warning! Not all buffers of %d bytes allocated\n",
						__func__, pBmPool->bufSize);
			break;
		}
		mvBmPoolPut(pBmPool->pool, (MV_ULONG) physAddr);
	}
	pBmPool->bufNum += i;

	mvOsPrintf("BM pool #%d for HWF: bufSize=%4d - %4d of %4d buffers added\n",
	       pBmPool->pool, pBmPool->bufSize, i, bufNum);

	return i;
}
/***********************************************************
 * mv_eth_bm_start --                                      *
 *   enable and fill BM pool                               *
 ***********************************************************/
static int mv_eth_bm_start(MV_VOID)
{
	unsigned char *buff, *buff_phys;
	int i;

	mvBmPoolControl(EGIGA_BM_POOL, MV_START);
	mvPp2BmPoolBufSizeSet(EGIGA_BM_POOL, RX_BUFFER_SIZE);

	/* fill BM pool with buffers */
	for (i = 0; i < EGIGA_BM_SIZE; i++) {
		buff = (unsigned char *)malloc(RX_BUFFER_SIZE);
		if (!buff)
			return -1;

		buff_phys = (unsigned char *)MV_ALIGN_UP((MV_ULONG)buff, BM_ALIGN);

		mvBmPoolPut(EGIGA_BM_POOL, (MV_ULONG)buff_phys, (MV_ULONG)buff);
	}

	return 0;
}
static int mvEgigaRx(struct eth_device *dev)
{
	egigaPriv *priv = dev->priv;
	MV_U8 *pkt;
	int packets_done = 0;
	int num_recieved_packets, pool_id;
	MV_U32 status;
	MV_PP2_PHYS_RXQ_CTRL *pRxq;
	PP2_RX_DESC *pDesc;

	if (priv->devInit != MV_TRUE || priv->devEnable != MV_TRUE)
		return 0; /* port is not initialized or not enabled */

	pRxq = mvPp2RxqHndlGet(priv->port, EGIGA_DEF_RXQ);
	num_recieved_packets = mvPp2RxqBusyDescNumGet(priv->port, EGIGA_DEF_RXQ);
	packets_done = num_recieved_packets;

	while (num_recieved_packets--) {
		pDesc = mvPp2RxqNextDescGet(pRxq);
		/* cache invalidate - descriptor */
		mvOsCacheLineInv(NULL, pDesc);
#if defined(MV_CPU_BE)
		mvNetaRxqDescSwap(pDesc);//TODO
#endif /* MV_CPU_BE */
		status = pDesc->status;

		/* drop packets with error or with buffer header (MC, SG) */
		if ((status & PP2_RX_BUF_HDR_MASK) || (status & PP2_RX_ES_MASK)) {
#if defined(MV_CPU_BE)
			mvNetaRxqDescSwap(pDesc);//TODO
#endif /* MV_CPU_BE */
			mvOsCacheLineFlushInv(NULL, pDesc);
			continue;
		}
		/* TODO: drop fragmented packets */

		/* cache invalidate - packet */
		mvOsCacheInvalidate(NULL, (void *)pDesc->bufPhysAddr, RX_BUFFER_SIZE);

		/* give packet to stack - skip on first 2 bytes + buffer header */
		pkt = ((MV_U8 *)pDesc->bufPhysAddr) + 2 + BUFF_HDR_OFFS;
		NetReceive(pkt, (int)pDesc->dataSize - 2);

		/* refill: pass packet back to BM */
		pool_id = (status & PP2_RX_BM_POOL_ALL_MASK) >> PP2_RX_BM_POOL_ID_OFFS;
		mvBmPoolPut(pool_id, (MV_ULONG) pDesc->bufPhysAddr, (MV_ULONG) pDesc->bufCookie);

		/* cache invalidate - packet */
#if defined(MV_CPU_BE)
		mvNetaRxqDescSwap(pDesc);//TODO
#endif /* MV_CPU_BE */
		mvOsCacheInvalidate(NULL, (void *)pDesc->bufPhysAddr, RX_BUFFER_SIZE);

	}
	/* cache invalidate - descriptor */
	mvOsCacheLineInv(NULL, pDesc);

	mvPp2RxqDescNumUpdate(priv->port, EGIGA_DEF_RXQ, packets_done, packets_done);

	return 0;
}