static inline void __dma_sync_mips(unsigned long addr, size_t size, enum dma_data_direction direction) { switch (direction) { case DMA_TO_DEVICE: dma_flush_range(addr, addr + size); break; case DMA_FROM_DEVICE: dma_inv_range(addr, addr + size); break; case DMA_BIDIRECTIONAL: dma_flush_range(addr, addr + size); break; default: BUG(); } }
static int at91_ether_send(struct eth_device *edev, void *packet, int length) { while (!(at91_emac_read(AT91_EMAC_TSR) & AT91_EMAC_TSR_BNQ)); dma_flush_range((ulong) packet, (ulong)packet + length); /* Set address of the data in the Transmit Address register */ at91_emac_write(AT91_EMAC_TAR, (unsigned long) packet); /* Set length of the packet in the Transmit Control register */ at91_emac_write(AT91_EMAC_TCR, length); while (at91_emac_read(AT91_EMAC_TCR) & 0x7ff); at91_emac_write(AT91_EMAC_TSR, at91_emac_read(AT91_EMAC_TSR) | AT91_EMAC_TSR_COMP); return 0; }
/* * This function sends a single packet on the network * and returns 0 on successful transmit or negative for error */ static int davinci_emac_send(struct eth_device *edev, void *packet, int length) { struct davinci_emac_priv *priv = edev->priv; uint64_t start; int ret_status; dev_dbg(priv->dev, "+ emac_send (length %d)\n", length); /* Check packet size and if < EMAC_MIN_ETHERNET_PKT_SIZE, pad it up */ if (length < EMAC_MIN_ETHERNET_PKT_SIZE) { length = EMAC_MIN_ETHERNET_PKT_SIZE; } /* Populate the TX descriptor */ writel(0, priv->emac_tx_desc + EMAC_DESC_NEXT); writel((uint8_t *) packet, priv->emac_tx_desc + EMAC_DESC_BUFFER); writel((length & 0xffff), priv->emac_tx_desc + EMAC_DESC_BUFF_OFF_LEN); writel(((length & 0xffff) | EMAC_CPPI_SOP_BIT | EMAC_CPPI_OWNERSHIP_BIT | EMAC_CPPI_EOP_BIT), priv->emac_tx_desc + EMAC_DESC_PKT_FLAG_LEN); dma_flush_range((ulong) packet, (ulong)packet + length); /* Send the packet */ writel(BD_TO_HW(priv->emac_tx_desc), priv->adap_emac + EMAC_TX0HDP); /* Wait for packet to complete or link down */ start = get_time_ns(); while (1) { if (readl(priv->adap_emac + EMAC_TXINTSTATRAW) & 0x01) { /* Acknowledge the TX descriptor */ writel(BD_TO_HW(priv->emac_tx_desc), priv->adap_emac + EMAC_TX0CP); ret_status = 0; break; } if (is_timeout(start, 100 * MSECOND)) { ret_status = -ETIMEDOUT; break; } } dev_dbg(priv->dev, "- emac_send (ret_status %i)\n", ret_status); return ret_status; }
static int dwc_ether_send(struct eth_device *dev, void *packet, int length) { struct dw_eth_dev *priv = dev->priv; struct eth_dma_regs *dma_p = priv->dma_regs_p; u32 desc_num = priv->tx_currdescnum; struct dmamacdescr *desc_p = &priv->tx_mac_descrtable[desc_num]; /* Check if the descriptor is owned by CPU */ if (desc_p->txrx_status & DESC_TXSTS_OWNBYDMA) { dev_err(&dev->dev, "CPU not owner of tx frame\n"); return -1; } memcpy((void *)desc_p->dmamac_addr, packet, length); dma_flush_range((unsigned long)desc_p->dmamac_addr, (unsigned long)desc_p->dmamac_addr + length); #if defined(CONFIG_DRIVER_NET_DESIGNWARE_ALTDESCRIPTOR) desc_p->txrx_status |= DESC_TXSTS_TXFIRST | DESC_TXSTS_TXLAST; desc_p->dmamac_cntl |= (length << DESC_TXCTRL_SIZE1SHFT) & \ DESC_TXCTRL_SIZE1MASK; desc_p->txrx_status &= ~(DESC_TXSTS_MSK); desc_p->txrx_status |= DESC_TXSTS_OWNBYDMA; #else desc_p->dmamac_cntl |= ((length << DESC_TXCTRL_SIZE1SHFT) & \ DESC_TXCTRL_SIZE1MASK) | DESC_TXCTRL_TXLAST | \ DESC_TXCTRL_TXFIRST; desc_p->txrx_status = DESC_TXSTS_OWNBYDMA; #endif /* Test the wrap-around condition. */ if (++desc_num >= CONFIG_TX_DESCR_NUM) desc_num = 0; priv->tx_currdescnum = desc_num; /* Start the transmission */ writel(POLL_DATA, &dma_p->txpolldemand); return 0; }
static int arc_emac_send(struct eth_device *edev, void *data, int length) { struct arc_emac_priv *priv = edev->priv; struct arc_emac_bd *bd = &priv->txbd[priv->txbd_curr]; char txbuf[EMAC_ZLEN]; int ret; /* Pad short frames to minimum length */ if (length < EMAC_ZLEN) { memcpy(txbuf, data, length); memset(txbuf + length, 0, EMAC_ZLEN - length); data = txbuf; length = EMAC_ZLEN; } dma_flush_range((unsigned long)data, (unsigned long)data + length); bd->data = cpu_to_le32(data); bd->info = cpu_to_le32(FOR_EMAC | FIRST_OR_LAST_MASK | length); arc_reg_set(priv, R_STATUS, TXPL_MASK); ret = wait_on_timeout(20 * MSECOND, (arc_reg_get(priv, R_STATUS) & TXINT_MASK) != 0); if (ret) { dev_err(&edev->dev, "transmit timeout\n"); return ret; } arc_reg_set(priv, R_STATUS, TXINT_MASK); priv->txbd_curr++; priv->txbd_curr %= TX_BD_NUM; return 0; }
static int dwmci_cmd(struct mci_host *mci, struct mci_cmd *cmd, struct mci_data *data) { struct dwmci_host *host = to_dwmci_host(mci); int flags = 0; uint32_t mask, ctrl; uint64_t start; int ret; unsigned int num_bytes = 0; const void *writebuf = NULL; start = get_time_ns(); while (1) { if (!(dwmci_readl(host, DWMCI_STATUS) & DWMCI_STATUS_BUSY)) break; if (is_timeout(start, 100 * MSECOND)) { dev_dbg(host->dev, "Timeout on data busy\n"); return -ETIMEDOUT; } } dwmci_writel(host, DWMCI_RINTSTS, DWMCI_INTMSK_ALL); if (data) { num_bytes = data->blocks * data->blocksize; if (data->flags & MMC_DATA_WRITE) { dma_flush_range((unsigned long)data->src, (unsigned long)(data->src + data->blocks * 512)); writebuf = data->src; } ret = dwmci_prepare_data(host, data); if (ret) return ret; } dwmci_writel(host, DWMCI_CMDARG, cmd->cmdarg); if (data) flags = dwmci_set_transfer_mode(host, data); if ((cmd->resp_type & MMC_RSP_136) && (cmd->resp_type & MMC_RSP_BUSY)) return -EINVAL; if (cmd->cmdidx == MMC_CMD_STOP_TRANSMISSION) flags |= DWMCI_CMD_ABORT_STOP; else flags |= DWMCI_CMD_PRV_DAT_WAIT; if (cmd->resp_type & MMC_RSP_PRESENT) { flags |= DWMCI_CMD_RESP_EXP; if (cmd->resp_type & MMC_RSP_136) flags |= DWMCI_CMD_RESP_LENGTH; } if (cmd->resp_type & MMC_RSP_CRC) flags |= DWMCI_CMD_CHECK_CRC; flags |= (cmd->cmdidx | DWMCI_CMD_START | DWMCI_CMD_USE_HOLD_REG); dev_dbg(host->dev, "Sending CMD%d\n", cmd->cmdidx); dwmci_writel(host, DWMCI_CMD, flags); start = get_time_ns(); while (1) { mask = dwmci_readl(host, DWMCI_RINTSTS); if (mask & DWMCI_INTMSK_CDONE) { if (!data) dwmci_writel(host, DWMCI_RINTSTS, mask); break; } if (is_timeout(start, 100 * MSECOND)) return -ETIMEDOUT; } if (mask & DWMCI_INTMSK_RTO) { dev_dbg(host->dev, "Response Timeout..\n"); return -ETIMEDOUT; } else if (mask & DWMCI_INTMSK_RE) { dev_dbg(host->dev, "Response Error..\n"); return -EIO; } if (cmd->resp_type & MMC_RSP_PRESENT) { if (cmd->resp_type & MMC_RSP_136) { cmd->response[0] = dwmci_readl(host, DWMCI_RESP3); cmd->response[1] = dwmci_readl(host, DWMCI_RESP2); cmd->response[2] = dwmci_readl(host, DWMCI_RESP1); cmd->response[3] = dwmci_readl(host, DWMCI_RESP0); } else { cmd->response[0] = dwmci_readl(host, DWMCI_RESP0); } } if (data) { start = get_time_ns(); do { mask = dwmci_readl(host, DWMCI_RINTSTS); if (mask & (DWMCI_DATA_ERR | DWMCI_DATA_TOUT)) { dev_dbg(host->dev, "DATA ERROR!\n"); return -EIO; } if (is_timeout(start, SECOND)) return -ETIMEDOUT; } while (!(mask & DWMCI_INTMSK_DTO)); dwmci_writel(host, DWMCI_RINTSTS, mask); ctrl = dwmci_readl(host, DWMCI_CTRL); ctrl &= ~(DWMCI_DMA_EN); dwmci_writel(host, DWMCI_CTRL, ctrl); if (data->flags & MMC_DATA_READ) { dma_inv_range((unsigned long)data->dest, (unsigned long)(data->dest + data->blocks * 512)); } } udelay(100); return 0; }