/** * Pull one frame from the card * @param[in] dev Our ethernet device to handle * @return Length of packet read */ static int fec_recv(struct eth_device *dev) { struct fec_priv *fec = (struct fec_priv *)dev->priv; struct fec_bd *rbd = &fec->rbd_base[fec->rbd_index]; unsigned long ievent; int frame_length, len = 0; struct nbuf *frame; uint16_t bd_status; uchar buff[FEC_MAX_PKT_SIZE]; /* * Check if any critical events have happened */ ievent = readl(&fec->eth->ievent); writel(ievent, &fec->eth->ievent); debug("fec_recv: ievent 0x%lx\n", ievent); if (ievent & FEC_IEVENT_BABR) { fec_halt(dev); fec_init(dev, fec->bd); printf("some error: 0x%08lx\n", ievent); return 0; } if (ievent & FEC_IEVENT_HBERR) { /* Heartbeat error */ writel(0x00000001 | readl(&fec->eth->x_cntrl), &fec->eth->x_cntrl); } if (ievent & FEC_IEVENT_GRA) { /* Graceful stop complete */ if (readl(&fec->eth->x_cntrl) & 0x00000001) { fec_halt(dev); writel(~0x00000001 & readl(&fec->eth->x_cntrl), &fec->eth->x_cntrl); fec_init(dev, fec->bd); } } /* * ensure reading the right buffer status */ bd_status = readw(&rbd->status); debug("fec_recv: status 0x%x\n", bd_status); if (!(bd_status & FEC_RBD_EMPTY)) { if ((bd_status & FEC_RBD_LAST) && !(bd_status & FEC_RBD_ERR) && ((readw(&rbd->data_length) - 4) > 14)) { /* * Get buffer address and size */ frame = (struct nbuf *)readl(&rbd->data_pointer); frame_length = readw(&rbd->data_length) - 4; /* * Fill the buffer and pass it to upper layers */ #ifdef CONFIG_FEC_MXC_SWAP_PACKET swap_packet((uint32_t *)frame->data, frame_length); #endif memcpy(buff, frame->data, frame_length); NetReceive(buff, frame_length); len = frame_length; } else { if (bd_status & FEC_RBD_ERR) printf("error frame: 0x%08lx 0x%08x\n", (ulong)rbd->data_pointer, bd_status); } /* * free the current buffer, restart the engine * and move forward to the next buffer */ fec_rbd_clean(fec->rbd_index == (FEC_RBD_NUM - 1) ? 1 : 0, rbd); fec_rx_task_enable(fec); fec->rbd_index = (fec->rbd_index + 1) % FEC_RBD_NUM; } debug("fec_recv: stop\n"); return len; }
/** * Pull one frame from the card * @param[in] dev Our ethernet device to handle * @return Length of packet read */ static int fec_recv(struct eth_device *dev) { struct fec_priv *fec = (struct fec_priv *)dev->priv; struct fec_bd *rbd = &fec->rbd_base[fec->rbd_index]; unsigned long ievent; int frame_length, len = 0; uint16_t bd_status; uint32_t addr, size, end; int i; ALLOC_CACHE_ALIGN_BUFFER(uchar, buff, FEC_MAX_PKT_SIZE); /* * Check if any critical events have happened */ ievent = readl(&fec->eth->ievent); writel(ievent, &fec->eth->ievent); debug("fec_recv: ievent 0x%lx\n", ievent); if (ievent & FEC_IEVENT_BABR) { fec_halt(dev); fec_init(dev, fec->bd); printf("some error: 0x%08lx\n", ievent); return 0; } if (ievent & FEC_IEVENT_HBERR) { /* Heartbeat error */ writel(0x00000001 | readl(&fec->eth->x_cntrl), &fec->eth->x_cntrl); } if (ievent & FEC_IEVENT_GRA) { /* Graceful stop complete */ if (readl(&fec->eth->x_cntrl) & 0x00000001) { fec_halt(dev); writel(~0x00000001 & readl(&fec->eth->x_cntrl), &fec->eth->x_cntrl); fec_init(dev, fec->bd); } } /* * Read the buffer status. Before the status can be read, the data cache * must be invalidated, because the data in RAM might have been changed * by DMA. The descriptors are properly aligned to cachelines so there's * no need to worry they'd overlap. * * WARNING: By invalidating the descriptor here, we also invalidate * the descriptors surrounding this one. Therefore we can NOT change the * contents of this descriptor nor the surrounding ones. The problem is * that in order to mark the descriptor as processed, we need to change * the descriptor. The solution is to mark the whole cache line when all * descriptors in the cache line are processed. */ addr = (uint32_t)rbd; addr &= ~(ARCH_DMA_MINALIGN - 1); size = roundup(sizeof(struct fec_bd), ARCH_DMA_MINALIGN); invalidate_dcache_range(addr, addr + size); bd_status = readw(&rbd->status); debug("fec_recv: status 0x%x\n", bd_status); if (!(bd_status & FEC_RBD_EMPTY)) { if ((bd_status & FEC_RBD_LAST) && !(bd_status & FEC_RBD_ERR) && ((readw(&rbd->data_length) - 4) > 14)) { /* * Get buffer address and size */ addr = readl(&rbd->data_pointer); frame_length = readw(&rbd->data_length) - 4; /* * Invalidate data cache over the buffer */ end = roundup(addr + frame_length, ARCH_DMA_MINALIGN); addr &= ~(ARCH_DMA_MINALIGN - 1); invalidate_dcache_range(addr, end); /* * Fill the buffer and pass it to upper layers */ #ifdef CONFIG_FEC_MXC_SWAP_PACKET swap_packet((uint32_t *)addr, frame_length); #endif memcpy(buff, (char *)addr, frame_length); net_process_received_packet(buff, frame_length); len = frame_length; } else { if (bd_status & FEC_RBD_ERR) printf("error frame: 0x%08x 0x%08x\n", addr, bd_status); } /* * Free the current buffer, restart the engine and move forward * to the next buffer. Here we check if the whole cacheline of * descriptors was already processed and if so, we mark it free * as whole. */ size = RXDESC_PER_CACHELINE - 1; if ((fec->rbd_index & size) == size) { i = fec->rbd_index - size; addr = (uint32_t)&fec->rbd_base[i]; for (; i <= fec->rbd_index ; i++) { fec_rbd_clean(i == (FEC_RBD_NUM - 1), &fec->rbd_base[i]); } flush_dcache_range(addr, addr + ARCH_DMA_MINALIGN); } fec_rx_task_enable(fec); fec->rbd_index = (fec->rbd_index + 1) % FEC_RBD_NUM; } debug("fec_recv: stop\n"); return len; }
/** * Transmit one frame * @param[in] dev Our ethernet device to handle * @param[in] packet Pointer to the data to be transmitted * @param[in] length Data count in bytes * @return 0 on success */ static int fec_send(struct eth_device *dev, volatile void* packet, int length) { unsigned int status; /* * This routine transmits one frame. This routine only accepts * 6-byte Ethernet addresses. */ struct fec_priv *fec = (struct fec_priv *)dev->priv; /* * Check for valid length of data. */ if ((length > 1500) || (length <= 0)) { printf("Payload (%d) too large\n", length); return -1; } /* * Setup the transmit buffer * Note: We are always using the first buffer for transmission, * the second will be empty and only used to stop the DMA engine */ #ifdef CONFIG_FEC_MXC_SWAP_PACKET swap_packet((uint32_t *)packet, length); #endif writew(length, &fec->tbd_base[fec->tbd_index].data_length); writel((uint32_t)packet, &fec->tbd_base[fec->tbd_index].data_pointer); /* * update BD's status now * This block: * - is always the last in a chain (means no chain) * - should transmitt the CRC * - might be the last BD in the list, so the address counter should * wrap (-> keep the WRAP flag) */ status = readw(&fec->tbd_base[fec->tbd_index].status) & FEC_TBD_WRAP; status |= FEC_TBD_LAST | FEC_TBD_TC | FEC_TBD_READY; writew(status, &fec->tbd_base[fec->tbd_index].status); /* * Enable SmartDMA transmit task */ fec_tx_task_enable(fec); /* * wait until frame is sent . */ while (readw(&fec->tbd_base[fec->tbd_index].status) & FEC_TBD_READY) { udelay(1); } debug("fec_send: status 0x%x index %d\n", readw(&fec->tbd_base[fec->tbd_index].status), fec->tbd_index); /* for next transmission use the other buffer */ if (fec->tbd_index) fec->tbd_index = 0; else fec->tbd_index = 1; return 0; }
/** * Transmit one frame * @param[in] dev Our ethernet device to handle * @param[in] packet Pointer to the data to be transmitted * @param[in] length Data count in bytes * @return 0 on success */ static int fec_send(struct eth_device *dev, void *packet, int length) { unsigned int status; uint32_t size, end; uint32_t addr; int timeout = FEC_XFER_TIMEOUT; int ret = 0; /* * This routine transmits one frame. This routine only accepts * 6-byte Ethernet addresses. */ struct fec_priv *fec = (struct fec_priv *)dev->priv; /* * Check for valid length of data. */ if ((length > 1500) || (length <= 0)) { printf("Payload (%d) too large\n", length); return -1; } /* * Setup the transmit buffer. We are always using the first buffer for * transmission, the second will be empty and only used to stop the DMA * engine. We also flush the packet to RAM here to avoid cache trouble. */ #ifdef CONFIG_FEC_MXC_SWAP_PACKET swap_packet((uint32_t *)packet, length); #endif addr = (uint32_t)packet; end = roundup(addr + length, ARCH_DMA_MINALIGN); addr &= ~(ARCH_DMA_MINALIGN - 1); flush_dcache_range(addr, end); writew(length, &fec->tbd_base[fec->tbd_index].data_length); writel(addr, &fec->tbd_base[fec->tbd_index].data_pointer); /* * update BD's status now * This block: * - is always the last in a chain (means no chain) * - should transmitt the CRC * - might be the last BD in the list, so the address counter should * wrap (-> keep the WRAP flag) */ status = readw(&fec->tbd_base[fec->tbd_index].status) & FEC_TBD_WRAP; status |= FEC_TBD_LAST | FEC_TBD_TC | FEC_TBD_READY; writew(status, &fec->tbd_base[fec->tbd_index].status); /* * Flush data cache. This code flushes both TX descriptors to RAM. * After this code, the descriptors will be safely in RAM and we * can start DMA. */ size = roundup(2 * sizeof(struct fec_bd), ARCH_DMA_MINALIGN); addr = (uint32_t)fec->tbd_base; flush_dcache_range(addr, addr + size); /* * Below we read the DMA descriptor's last four bytes back from the * DRAM. This is important in order to make sure that all WRITE * operations on the bus that were triggered by previous cache FLUSH * have completed. * * Otherwise, on MX28, it is possible to observe a corruption of the * DMA descriptors. Please refer to schematic "Figure 1-2" in MX28RM * for the bus structure of MX28. The scenario is as follows: * * 1) ARM core triggers a series of WRITEs on the AHB_ARB2 bus going * to DRAM due to flush_dcache_range() * 2) ARM core writes the FEC registers via AHB_ARB2 * 3) FEC DMA starts reading/writing from/to DRAM via AHB_ARB3 * * Note that 2) does sometimes finish before 1) due to reordering of * WRITE accesses on the AHB bus, therefore triggering 3) before the * DMA descriptor is fully written into DRAM. This results in occasional * corruption of the DMA descriptor. */ readl(addr + size - 4); /* * Enable SmartDMA transmit task */ fec_tx_task_enable(fec); /* * Wait until frame is sent. On each turn of the wait cycle, we must * invalidate data cache to see what's really in RAM. Also, we need * barrier here. */ while (--timeout) { if (!(readl(&fec->eth->x_des_active) & FEC_X_DES_ACTIVE_TDAR)) break; } if (!timeout) { ret = -EINVAL; goto out; } /* * The TDAR bit is cleared when the descriptors are all out from TX * but on mx6solox we noticed that the READY bit is still not cleared * right after TDAR. * These are two distinct signals, and in IC simulation, we found that * TDAR always gets cleared prior than the READY bit of last BD becomes * cleared. * In mx6solox, we use a later version of FEC IP. It looks like that * this intrinsic behaviour of TDAR bit has changed in this newer FEC * version. * * Fix this by polling the READY bit of BD after the TDAR polling, * which covers the mx6solox case and does not harm the other SoCs. */ timeout = FEC_XFER_TIMEOUT; while (--timeout) { invalidate_dcache_range(addr, addr + size); if (!(readw(&fec->tbd_base[fec->tbd_index].status) & FEC_TBD_READY)) break; } if (!timeout) ret = -EINVAL; out: debug("fec_send: status 0x%x index %d ret %i\n", readw(&fec->tbd_base[fec->tbd_index].status), fec->tbd_index, ret); /* for next transmission use the other buffer */ if (fec->tbd_index) fec->tbd_index = 0; else fec->tbd_index = 1; return ret; }