static void dump_xor(void) { mvOsPrintf(" CHANNEL_ARBITER_REG %08x\n", MV_REG_READ(XOR_CHANNEL_ARBITER_REG(1))); mvOsPrintf(" CONFIG_REG %08x\n", MV_REG_READ(XOR_CONFIG_REG(1, XOR_CHAN(0)))); mvOsPrintf(" ACTIVATION_REG %08x\n", MV_REG_READ(XOR_ACTIVATION_REG(1, XOR_CHAN(0)))); mvOsPrintf(" CAUSE_REG %08x\n", MV_REG_READ(XOR_CAUSE_REG(1))); mvOsPrintf(" MASK_REG %08x\n", MV_REG_READ(XOR_MASK_REG(1))); mvOsPrintf(" ERROR_CAUSE_REG %08x\n", MV_REG_READ(XOR_ERROR_CAUSE_REG(1))); mvOsPrintf(" ERROR_ADDR_REG %08x\n", MV_REG_READ(XOR_ERROR_ADDR_REG(1))); mvOsPrintf(" NEXT_DESC_PTR_REG %08x\n", MV_REG_READ(XOR_NEXT_DESC_PTR_REG(1, XOR_CHAN(0)))); mvOsPrintf(" CURR_DESC_PTR_REG %08x\n", MV_REG_READ(XOR_CURR_DESC_PTR_REG(1, XOR_CHAN(0)))); mvOsPrintf(" BYTE_COUNT_REG %08x\n\n", MV_REG_READ(XOR_BYTE_COUNT_REG(1, XOR_CHAN(0)))); mvOsPrintf(" %08x\n\n", XOR_WINDOW_CTRL_REG(1, XOR_CHAN(0))) ; mvOsPrintf(" XOR_WINDOW_CTRL_REG %08x\n\n", MV_REG_READ(XOR_WINDOW_CTRL_REG(1, XOR_CHAN(0)))) ; }
void print_xor_regs(int chan) { printk(" XOR_CHANNEL_ARBITER_REG %08x\n", MV_REG_READ(XOR_CHANNEL_ARBITER_REG)); printk(" XOR_CONFIG_REG %08x\n", MV_REG_READ(XOR_CONFIG_REG(chan))); printk(" XOR_ACTIVATION_REG %08x\n", MV_REG_READ(XOR_ACTIVATION_REG(chan))); printk(" XOR_CAUSE_REG %08x\n", MV_REG_READ(XOR_CAUSE_REG)); printk(" XOR_MASK_REG %08x\n", MV_REG_READ(XOR_MASK_REG)); printk(" XOR_ERROR_CAUSE_REG %08x\n", MV_REG_READ(XOR_ERROR_CAUSE_REG)); printk(" XOR_ERROR_ADDR_REG %08x\n", MV_REG_READ(XOR_ERROR_ADDR_REG)); printk(" XOR_NEXT_DESC_PTR_REG %08x\n", MV_REG_READ(XOR_NEXT_DESC_PTR_REG(chan))); printk(" XOR_CURR_DESC_PTR_REG %08x\n", MV_REG_READ(XOR_CURR_DESC_PTR_REG(chan))); printk(" XOR_BYTE_COUNT_REG %08x\n", MV_REG_READ(XOR_BYTE_COUNT_REG(chan))); }
void setXorDesc(void) { unsigned int mode; eth_xor_desc = mvOsMalloc(sizeof(MV_XOR_DESC) + XEXDPR_DST_PTR_DMA_MASK + 32); eth_xor_desc = (MV_XOR_DESC *)MV_ALIGN_UP((MV_U32)eth_xor_desc, XEXDPR_DST_PTR_DMA_MASK+1); eth_xor_desc_phys_addr = mvOsIoVirtToPhys(NULL, eth_xor_desc); mvSysXorInit(); mode = MV_REG_READ(XOR_CONFIG_REG(1, XOR_CHAN(0))); mode &= ~XEXCR_OPERATION_MODE_MASK; mode |= XEXCR_OPERATION_MODE_DMA; MV_REG_WRITE(XOR_CONFIG_REG(1, XOR_CHAN(0)), mode); MV_REG_WRITE(XOR_NEXT_DESC_PTR_REG(1, XOR_CHAN(0)), eth_xor_desc_phys_addr); dump_xor(); }
static inline struct eth_pbuf *eth_l2fw_copy_packet_withXor(struct eth_pbuf *pRxPktInfo) { struct bm_pool *pool; struct eth_pbuf *pTxPktInfo; pool = &mv_eth_pool[pRxPktInfo->pool]; pTxPktInfo = mv_eth_pool_get(pool); if (pTxPktInfo == NULL) { mvOsPrintf("pTxPktInfo == NULL in %s\n", __func__); return NULL; } /* sync between giga and XOR to avoid errors (like checksum errors in TX) when working with IOCC */ mvOsCacheIoSync(); eth_xor_desc->srcAdd0 = pRxPktInfo->physAddr + pRxPktInfo->offset + MV_ETH_MH_SIZE + 30; eth_xor_desc->phyDestAdd = pTxPktInfo->physAddr + pTxPktInfo->offset + MV_ETH_MH_SIZE + 30; eth_xor_desc->byteCnt = pRxPktInfo->bytes - 30; eth_xor_desc->phyNextDescPtr = 0; eth_xor_desc->status = BIT31; /* we had changed only the first part of eth_xor_desc, so flush only one line of cache */ mvOsCacheLineFlush(NULL, eth_xor_desc); MV_REG_WRITE(XOR_NEXT_DESC_PTR_REG(1, XOR_CHAN(0)), eth_xor_desc_phys_addr); MV_REG_WRITE(XOR_ACTIVATION_REG(1, XOR_CHAN(0)), XEXACTR_XESTART_MASK); mvOsCacheLineInv(NULL, pRxPktInfo->pBuf + pRxPktInfo->offset); l2fw_copy_mac(pRxPktInfo, pTxPktInfo); mvOsCacheLineFlush(NULL, pTxPktInfo->pBuf + pTxPktInfo->offset); /* Update TxPktInfo */ pTxPktInfo->bytes = pRxPktInfo->bytes; return pTxPktInfo; }
/* * mv_xor_transfer - Transfer data from source to destination on one of * three modes (XOR,CRC32,DMA) * * DESCRIPTION: * This function initiates XOR channel, according to function parameters, * in order to perform XOR or CRC32 or DMA transaction. * To gain maximum performance the user is asked to keep the following * restrictions: * 1) Selected engine is available (not busy). * 1) This module does not take into consideration CPU MMU issues. * In order for the XOR engine to access the appropreate source * and destination, address parameters must be given in system * physical mode. * 2) This API does not take care of cache coherency issues. The source, * destination and in case of chain the descriptor list are assumed * to be cache coherent. * 4) Parameters validity. For example, does size parameter exceeds * maximum byte count of descriptor mode (16M or 64K). * * INPUT: * chan - XOR channel number. See MV_XOR_CHANNEL enumerator. * xor_type - One of three: XOR, CRC32 and DMA operations. * xor_chain_ptr - address of chain pointer * * OUTPUT: * None. * * RETURS: * MV_BAD_PARAM if parameters to function invalid, MV_OK otherwise. * */ int mv_xor_transfer(u32 chan, int xor_type, u32 xor_chain_ptr) { u32 tmp; /* Parameter checking */ if (chan >= MV_XOR_MAX_CHAN) { debug("%s: ERR. Invalid chan num %d\n", __func__, chan); return MV_BAD_PARAM; } if (MV_ACTIVE == mv_xor_state_get(chan)) { debug("%s: ERR. Channel is already active\n", __func__); return MV_BUSY; } if (0x0 == xor_chain_ptr) { debug("%s: ERR. xor_chain_ptr is NULL pointer\n", __func__); return MV_BAD_PARAM; } /* Read configuration register and mask the operation mode field */ tmp = reg_read(XOR_CONFIG_REG(XOR_UNIT(chan), XOR_CHAN(chan))); tmp &= ~XEXCR_OPERATION_MODE_MASK; switch (xor_type) { case MV_XOR: if (0 != (xor_chain_ptr & XEXDPR_DST_PTR_XOR_MASK)) { debug("%s: ERR. Invalid chain pointer (bits [5:0] must be cleared)\n", __func__); return MV_BAD_PARAM; } /* Set the operation mode to XOR */ tmp |= XEXCR_OPERATION_MODE_XOR; break; case MV_DMA: if (0 != (xor_chain_ptr & XEXDPR_DST_PTR_DMA_MASK)) { debug("%s: ERR. Invalid chain pointer (bits [4:0] must be cleared)\n", __func__); return MV_BAD_PARAM; } /* Set the operation mode to DMA */ tmp |= XEXCR_OPERATION_MODE_DMA; break; case MV_CRC32: if (0 != (xor_chain_ptr & XEXDPR_DST_PTR_CRC_MASK)) { debug("%s: ERR. Invalid chain pointer (bits [4:0] must be cleared)\n", __func__); return MV_BAD_PARAM; } /* Set the operation mode to CRC32 */ tmp |= XEXCR_OPERATION_MODE_CRC; break; default: return MV_BAD_PARAM; } /* Write the operation mode to the register */ reg_write(XOR_CONFIG_REG(XOR_UNIT(chan), XOR_CHAN(chan)), tmp); /* * Update the NextDescPtr field in the XOR Engine [0..1] Next Descriptor * Pointer Register (XExNDPR) */ reg_write(XOR_NEXT_DESC_PTR_REG(XOR_UNIT(chan), XOR_CHAN(chan)), xor_chain_ptr); /* Start transfer */ reg_bit_set(XOR_ACTIVATION_REG(XOR_UNIT(chan), XOR_CHAN(chan)), XEXACTR_XESTART_MASK); return MV_OK; }
/******************************************************************************* * mvXorTransfer - Transfer data from source to destination on one of * three modes (XOR,CRC32,DMA) * * DESCRIPTION: * This function initiates XOR channel, according to function parameters, * in order to perform XOR or CRC32 or DMA transaction. * To gain maximum performance the user is asked to keep the following * restrictions: * 1) Selected engine is available (not busy). * 1) This module does not take into consideration CPU MMU issues. * In order for the XOR engine to access the appropreate source * and destination, address parameters must be given in system * physical mode. * 2) This API does not take care of cache coherency issues. The source, * destination and in case of chain the descriptor list are assumed * to be cache coherent. * 4) Parameters validity. For example, does size parameter exceeds * maximum byte count of descriptor mode (16M or 64K). * * INPUT: * chan - XOR channel number. See MV_XOR_CHANNEL enumerator. * xorType - One of three: XOR, CRC32 and DMA operations. * xorChainPtr - address of chain pointer * * OUTPUT: * None. * * RETURS: * MV_BAD_PARAM if parameters to function invalid, MV_OK otherwise. * *******************************************************************************/ MV_STATUS mvXorTransfer(MV_U32 chan, MV_XOR_TYPE xorType, MV_U32 xorChainPtr) { MV_U32 temp; /* Parameter checking */ if (chan >= MV_XOR_MAX_CHAN) { DB(mvOsPrintf("%s: ERR. Invalid chan num %d\n", __func__, chan)); return MV_BAD_PARAM; } if (MV_ACTIVE == mvXorStateGet(chan)) { DB(mvOsPrintf("%s: ERR. Channel is already active\n", __func__)); return MV_BUSY; } if (0x0 == xorChainPtr) { DB(mvOsPrintf("%s: ERR. xorChainPtr is NULL pointer\n", __func__)); return MV_BAD_PARAM; } /* read configuration register and mask the operation mode field */ temp = MV_REG_READ(XOR_CONFIG_REG(XOR_UNIT(chan), XOR_CHAN(chan))); temp &= ~XEXCR_OPERATION_MODE_MASK; switch (xorType) { case MV_XOR: if (0 != (xorChainPtr & XEXDPR_DST_PTR_XOR_MASK)) { DB(mvOsPrintf("%s: ERR. Invalid chain pointer (bits [5:0] must " "be cleared)\n", __func__)); return MV_BAD_PARAM; } /* set the operation mode to XOR */ temp |= XEXCR_OPERATION_MODE_XOR; break; case MV_DMA: if (0 != (xorChainPtr & XEXDPR_DST_PTR_DMA_MASK)) { DB(mvOsPrintf("%s: ERR. Invalid chain pointer (bits [4:0] must " "be cleared)\n", __func__)); return MV_BAD_PARAM; } /* set the operation mode to DMA */ temp |= XEXCR_OPERATION_MODE_DMA; break; case MV_CRC32: if (0 != (xorChainPtr & XEXDPR_DST_PTR_CRC_MASK)) { DB(mvOsPrintf("%s: ERR. Invalid chain pointer (bits [4:0] must " "be cleared)\n", __func__)); return MV_BAD_PARAM; } /* set the operation mode to CRC32 */ temp |= XEXCR_OPERATION_MODE_CRC; break; default: return MV_BAD_PARAM; } /* write the operation mode to the register */ MV_REG_WRITE(XOR_CONFIG_REG(XOR_UNIT(chan), XOR_CHAN(chan)), temp); /* update the NextDescPtr field in the XOR Engine [0..1] Next Descriptor Pointer Register (XExNDPR) */ MV_REG_WRITE(XOR_NEXT_DESC_PTR_REG(XOR_UNIT(chan), XOR_CHAN(chan)), xorChainPtr); /* start transfer */ MV_REG_BIT_SET(XOR_ACTIVATION_REG(XOR_UNIT(chan), XOR_CHAN(chan)), XEXACTR_XESTART_MASK); return MV_OK; }