static void msmsdcc_handle_irq_data(struct msmsdcc_host *host, u32 status, void __iomem *base) { struct mmc_data *data = host->curr.data; if (!data) return; /* Check for data errors */ if (status & (MCI_DATACRCFAIL | MCI_DATATIMEOUT | MCI_TXUNDERRUN | MCI_RXOVERRUN)) { msmsdcc_data_err(host, data, status); host->curr.data_xfered = 0; if (host->dma.sg) msm_dmov_stop_cmd(host->dma.channel, &host->dma.hdr, 0); else { msmsdcc_stop_data(host); if (!data->stop) msmsdcc_request_end(host, data->mrq); else msmsdcc_start_command(host, data->stop, 0); } } /* Check for data done */ if (!host->curr.got_dataend && (status & MCI_DATAEND)) host->curr.got_dataend = 1; if (!host->curr.got_datablkend && (status & MCI_DATABLOCKEND)) host->curr.got_datablkend = 1; /* * If DMA is still in progress, we complete via the completion handler */ if (host->curr.got_dataend && host->curr.got_datablkend && !host->dma.busy) { /* * There appears to be an issue in the controller where * if you request a small block transfer (< fifo size), * you may get your DATAEND/DATABLKEND irq without the * PIO data irq. * * Check to see if there is still data to be read, * and simulate a PIO irq. */ if (readl(base + MMCISTATUS) & MCI_RXDATAAVLBL) msmsdcc_pio_irq(1, host); msmsdcc_stop_data(host); if (!data->error) host->curr.data_xfered = host->curr.xfer_size; if (!data->stop) msmsdcc_request_end(host, data->mrq); else msmsdcc_start_command(host, data->stop, 0); } }
/* * called when a command expires. * Dump some debugging, and then error * out the transaction. */ static void msmsdcc_command_expired(unsigned long _data) { struct msmsdcc_host *host = (struct msmsdcc_host *) _data; struct mmc_request *mrq; unsigned long flags; spin_lock_irqsave(&host->lock, flags); mrq = host->curr.mrq; if (!mrq) { printk(KERN_INFO "%s: Command expiry misfire\n", mmc_hostname(host->mmc)); spin_unlock_irqrestore(&host->lock, flags); return; } printk(KERN_ERR "%s: Command timeout (%p %p %p %p)\n", mmc_hostname(host->mmc), mrq, mrq->cmd, mrq->data, host->dma.sg); mrq->cmd->error = -ETIMEDOUT; msmsdcc_stop_data(host); writel(0, host->base + MMCICOMMAND); host->curr.mrq = NULL; host->curr.cmd = NULL; spin_unlock_irqrestore(&host->lock, flags); mmc_request_done(host->mmc, mrq); }
static void msmsdcc_do_cmdirq(struct msmsdcc_host *host, uint32_t status) { struct mmc_command *cmd = host->curr.cmd; void __iomem *base = host->base; host->curr.cmd = NULL; cmd->resp[0] = readl(base + MMCIRESPONSE0); cmd->resp[1] = readl(base + MMCIRESPONSE1); cmd->resp[2] = readl(base + MMCIRESPONSE2); cmd->resp[3] = readl(base + MMCIRESPONSE3); del_timer(&host->command_timer); if (status & MCI_CMDTIMEOUT) { cmd->error = -ETIMEDOUT; } else if (status & MCI_CMDCRCFAIL && cmd->flags & MMC_RSP_CRC) { pr_err("%s: Command CRC error\n", mmc_hostname(host->mmc)); cmd->error = -EILSEQ; } if (!cmd->data || cmd->error) { if (host->curr.data && host->dma.sg) msm_dmov_stop_cmd(host->dma.channel, &host->dma.hdr, 0); else if (host->curr.data) { /* Non DMA */ msmsdcc_stop_data(host); msmsdcc_request_end(host, cmd->mrq); } else /* host->data == NULL */ msmsdcc_request_end(host, cmd->mrq); } else if (!(cmd->data->flags & MMC_DATA_READ)) msmsdcc_start_data(host, cmd->data); }
/** * mmc_wait_for_req - start a request and wait for completion * @host: MMC host to start command * @mrq: MMC request to start * * Start a new MMC custom command request for a host, and wait * for the command to complete. Does not attempt to parse the * response. */ void mmc_wait_for_req(struct mmc_host *host, struct mmc_request *mrq) { #ifdef CONFIG_WIMAX int ret = 0; #endif DECLARE_COMPLETION_ONSTACK(complete); mrq->done_data = &complete; mrq->done = mmc_wait_done; mmc_start_request(host, mrq); #ifdef CONFIG_WIMAX ret = wait_for_completion_timeout(&complete, msecs_to_jiffies(5000)); if (ret <= 0) { struct msmsdcc_host *msm_host = mmc_priv(host); printk("[ERR] %s: %s wait_for_completion_timeout!\n", __func__, mmc_hostname(host)); msmsdcc_stop_data(msm_host); mrq->cmd->error = -ETIMEDOUT; msmsdcc_request_end(msm_host, mrq); } #else wait_for_completion(&complete); #endif }
/** * mmc_wait_for_req - start a request and wait for completion * @host: MMC host to start command * @mrq: MMC request to start * * Start a new MMC custom command request for a host, and wait * for the command to complete. Does not attempt to parse the * response. */ void mmc_wait_for_req(struct mmc_host *host, struct mmc_request *mrq) { #ifdef CONFIG_WIMAX #ifdef CONFIG_WIMAX_MMC_TIMEOUT int ret = 0; struct msmsdcc_host *msm_host = mmc_priv(host); int timeout = 0; #endif #endif DECLARE_COMPLETION_ONSTACK(complete); mrq->done_data = &complete; mrq->done = mmc_wait_done; mmc_start_request(host, mrq); #ifdef CONFIG_WIMAX #ifdef CONFIG_WIMAX_MMC #ifdef CONFIG_WIMAX_MMC_TIMEOUT if ( !(strcmp(mmc_hostname(host), CONFIG_WIMAX_MMC))) { #ifdef CONFIG_WIMAX_REQ_TIMEOUT ret = wait_for_completion_timeout(&complete, msecs_to_jiffies(CONFIG_WIMAX_REQ_TIMEOUT)); timeout = CONFIG_WIMAX_REQ_TIMEOUT; #else ret = wait_for_completion_timeout(&complete, msecs_to_jiffies(5000)); timeout = 5000; #endif if (ret <= 0) { printk("[ERR] %s: %s wait_for_completion_timeout in %d!\n", __func__, mmc_hostname(host), timeout); msmsdcc_reset_and_restore(msm_host); msmsdcc_stop_data(msm_host); mrq->cmd->error = -ETIMEDOUT; msmsdcc_request_end(msm_host, mrq); } } else #endif #endif #endif wait_for_completion_io(&complete); }
static irqreturn_t msmsdcc_irq(int irq, void *dev_id) { struct msmsdcc_host *host = dev_id; void __iomem *base = host->base; u32 status; int ret = 0; int cardint = 0; spin_lock(&host->lock); do { struct mmc_data *data; status = readl(base + MMCISTATUS); #if IRQ_DEBUG msmsdcc_print_status(host, "irq0-r", status); #endif status &= (readl(base + MMCIMASK0) | MCI_DATABLOCKENDMASK); writel(status, base + MMCICLEAR); #if IRQ_DEBUG msmsdcc_print_status(host, "irq0-p", status); #endif data = host->curr.data; if (data) { /* Check for data errors */ if (status & (MCI_DATACRCFAIL|MCI_DATATIMEOUT| MCI_TXUNDERRUN|MCI_RXOVERRUN)) { msmsdcc_data_err(host, data, status); host->curr.data_xfered = 0; if (host->dma.sg) msm_dmov_stop_cmd(host->dma.channel, &host->dma.hdr, 0); else { msmsdcc_stop_data(host); if (!data->stop) msmsdcc_request_end(host, data->mrq); else msmsdcc_start_command(host, data->stop, 0); } } /* Check for data done */ if (!host->curr.got_dataend && (status & MCI_DATAEND)) host->curr.got_dataend = 1; if (!host->curr.got_datablkend && (status & MCI_DATABLOCKEND)) { host->curr.got_datablkend = 1; } if (host->curr.got_dataend && host->curr.got_datablkend) { /* * If DMA is still in progress, we complete * via the completion handler */ if (!host->dma.busy) { /* * There appears to be an issue in the * controller where if you request a * small block transfer (< fifo size), * you may get your DATAEND/DATABLKEND * irq without the PIO data irq. * * Check to see if theres still data * to be read, and simulate a PIO irq. */ if (readl(base + MMCISTATUS) & MCI_RXDATAAVLBL) msmsdcc_pio_irq(1, host); msmsdcc_stop_data(host); if (!data->error) host->curr.data_xfered = host->curr.xfer_size; if (!data->stop) msmsdcc_request_end(host, data->mrq); else msmsdcc_start_command(host, data->stop, 0); } } } if (status & (MCI_CMDSENT | MCI_CMDRESPEND | MCI_CMDCRCFAIL | MCI_CMDTIMEOUT) && host->curr.cmd) { msmsdcc_do_cmdirq(host, status); } if (status & MCI_SDIOINTOPER) { cardint = 1; status &= ~MCI_SDIOINTOPER; } ret = 1; } while (status); spin_unlock(&host->lock); /* * We have to delay handling the card interrupt as it calls * back into the driver. */ if (cardint) { mmc_signal_sdio_irq(host->mmc); } return IRQ_RETVAL(ret); }
static void msmsdcc_dma_complete_func(struct msm_dmov_cmd *cmd, unsigned int result, struct msm_dmov_errdata *err) { struct msmsdcc_dma_data *dma_data = container_of(cmd, struct msmsdcc_dma_data, hdr); struct msmsdcc_host *host = dma_data->host; unsigned long flags; struct mmc_request *mrq; spin_lock_irqsave(&host->lock, flags); mrq = host->curr.mrq; BUG_ON(!mrq); if (!(result & DMOV_RSLT_VALID)) { printk(KERN_ERR "msmsdcc: Invalid DataMover result\n"); goto out; } if (result & DMOV_RSLT_DONE) { host->curr.data_xfered = host->curr.xfer_size; } else { /* Error or flush */ if (result & DMOV_RSLT_ERROR) printk(KERN_ERR "%s: DMA error (0x%.8x)\n", mmc_hostname(host->mmc), result); if (result & DMOV_RSLT_FLUSH) printk(KERN_ERR "%s: DMA channel flushed (0x%.8x)\n", mmc_hostname(host->mmc), result); if (err) printk(KERN_ERR "Flush data: %.8x %.8x %.8x %.8x %.8x %.8x\n", err->flush[0], err->flush[1], err->flush[2], err->flush[3], err->flush[4], err->flush[5]); if (!mrq->data->error) mrq->data->error = -EIO; } host->dma.busy = 0; dma_unmap_sg(mmc_dev(host->mmc), host->dma.sg, host->dma.num_ents, host->dma.dir); if (host->curr.user_pages) { struct scatterlist *sg = host->dma.sg; int i; for (i = 0; i < host->dma.num_ents; i++, sg++) flush_dcache_page(sg_page(sg)); } host->dma.sg = NULL; if ((host->curr.got_dataend && host->curr.got_datablkend) || mrq->data->error) { /* * If we've already gotten our DATAEND / DATABLKEND * for this request, then complete it through here. */ msmsdcc_stop_data(host); if (!mrq->data->error) host->curr.data_xfered = host->curr.xfer_size; if (!mrq->data->stop || mrq->cmd->error) { writel(0, host->base + MMCICOMMAND); host->curr.mrq = NULL; host->curr.cmd = NULL; mrq->data->bytes_xfered = host->curr.data_xfered; spin_unlock_irqrestore(&host->lock, flags); mmc_request_done(host->mmc, mrq); return; } else msmsdcc_start_command(host, mrq->data->stop, 0); } out: spin_unlock_irqrestore(&host->lock, flags); return; }
static irqreturn_t msmsdcc_irq(int irq, void *dev_id) { struct msmsdcc_host *host = dev_id; void __iomem *base = host->base; u32 status; int ret = 0; int timer = 0; spin_lock(&host->lock); do { struct mmc_command *cmd; struct mmc_data *data; if (timer) { timer = 0; msmsdcc_delay(host); } status = readl(host->base + MMCISTATUS); #if IRQ_DEBUG msmsdcc_print_status(host, "irq0-r", status); #endif status &= (readl(host->base + MMCIMASK0) | MCI_DATABLOCKENDMASK); writel(status, host->base + MMCICLEAR); #if IRQ_DEBUG msmsdcc_print_status(host, "irq0-p", status); #endif if ((host->plat->dummy52_required) && (host->dummy_52_state == DUMMY_52_STATE_SENT)) { if (status & MCI_PROGDONE) { host->dummy_52_state = DUMMY_52_STATE_NONE; host->curr.cmd = NULL; spin_unlock(&host->lock); msmsdcc_request_start(host, host->curr.mrq); return IRQ_HANDLED; } break; } data = host->curr.data; #ifdef CONFIG_MMC_MSM_SDIO_SUPPORT if (status & MCI_SDIOINTROPE) mmc_signal_sdio_irq(host->mmc); #endif /* * Check for proper command response */ cmd = host->curr.cmd; if ((status & (MCI_CMDSENT | MCI_CMDRESPEND | MCI_CMDCRCFAIL | MCI_CMDTIMEOUT | MCI_PROGDONE)) && cmd) { host->curr.cmd = NULL; cmd->resp[0] = readl(base + MMCIRESPONSE0); cmd->resp[1] = readl(base + MMCIRESPONSE1); cmd->resp[2] = readl(base + MMCIRESPONSE2); cmd->resp[3] = readl(base + MMCIRESPONSE3); if (status & MCI_CMDTIMEOUT) { #if VERBOSE_COMMAND_TIMEOUTS pr_err("%s: Command timeout\n", mmc_hostname(host->mmc)); #endif cmd->error = -ETIMEDOUT; } else if (status & MCI_CMDCRCFAIL && cmd->flags & MMC_RSP_CRC) { pr_err("%s: Command CRC error\n", mmc_hostname(host->mmc)); cmd->error = -EILSEQ; } if (!cmd->data || cmd->error) { if (host->curr.data && host->dma.sg) msm_dmov_stop_cmd(host->dma.channel, &host->dma.hdr, 0); else if (host->curr.data) { /* Non DMA */ msmsdcc_stop_data(host); timer |= msmsdcc_request_end(host, cmd->mrq); } else { /* host->data == NULL */ if (!cmd->error && host->prog_enable) { if (status & MCI_PROGDONE) { host->prog_scan = 0; host->prog_enable = 0; timer |= msmsdcc_request_end( host, cmd->mrq); } else host->curr.cmd = cmd; } else { if (host->prog_enable) { host->prog_scan = 0; host->prog_enable = 0; } timer |= msmsdcc_request_end( host, cmd->mrq); } } } else if (cmd->data) { if (!(cmd->data->flags & MMC_DATA_READ)) msmsdcc_start_data(host, cmd->data, NULL, 0); } } if (data) { /* Check for data errors */ if (status & (MCI_DATACRCFAIL|MCI_DATATIMEOUT| MCI_TXUNDERRUN|MCI_RXOVERRUN)) { msmsdcc_data_err(host, data, status); host->curr.data_xfered = 0; if (host->dma.sg) msm_dmov_stop_cmd(host->dma.channel, &host->dma.hdr, 0); else { msmsdcc_reset_and_restore(host); if (host->curr.data) msmsdcc_stop_data(host); if (!data->stop) timer |= msmsdcc_request_end(host, data->mrq); else { msmsdcc_start_command(host, data->stop, 0); timer = 1; } } } /* Check for data done */ if (!host->curr.got_dataend && (status & MCI_DATAEND)) host->curr.got_dataend = 1; if (!host->curr.got_datablkend && (status & MCI_DATABLOCKEND)) { host->curr.got_datablkend = 1; } if (host->curr.got_dataend && host->curr.got_datablkend) { /* * If DMA is still in progress, we complete * via the completion handler */ if (!host->dma.busy) { /* * There appears to be an issue in the * controller where if you request a * small block transfer (< fifo size), * you may get your DATAEND/DATABLKEND * irq without the PIO data irq. * * Check to see if theres still data * to be read, and simulate a PIO irq. */ if (readl(host->base + MMCISTATUS) & MCI_RXDATAAVLBL) msmsdcc_pio_irq(1, host); msmsdcc_stop_data(host); if (!data->error) host->curr.data_xfered = host->curr.xfer_size; if (!data->stop) timer |= msmsdcc_request_end( host, data->mrq); else { #if defined (CONFIG_MACH_ACER_A1) while ((host->pdev_id == 1) && (data->flags & MMC_DATA_WRITE) && (gpio_get_value(SDCC1_DATA_0) == 0)) { udelay(5); } #endif msmsdcc_start_command(host, data->stop, 0); timer = 1; } } } } ret = 1; } while (status); spin_unlock(&host->lock); return IRQ_RETVAL(ret); }
static void msmsdcc_dma_complete_tlet(unsigned long data) { struct msmsdcc_host *host = (struct msmsdcc_host *)data; unsigned long flags; struct mmc_request *mrq; spin_lock_irqsave(&host->lock, flags); mrq = host->curr.mrq; BUG_ON(!mrq); if (!(host->dma.result & DMOV_RSLT_VALID)) { pr_err("msmsdcc: Invalid DataMover result\n"); goto out; } if (host->dma.result & DMOV_RSLT_DONE) { host->curr.data_xfered = host->curr.xfer_size; } else { /* Error or flush */ if (host->dma.result & DMOV_RSLT_ERROR) pr_err("%s: DMA error (0x%.8x)\n", mmc_hostname(host->mmc), host->dma.result); if (host->dma.result & DMOV_RSLT_FLUSH) pr_err("%s: DMA channel flushed (0x%.8x)\n", mmc_hostname(host->mmc), host->dma.result); if (host->dma.err) { pr_err("Flush data: %.8x %.8x %.8x %.8x %.8x %.8x\n", host->dma.err->flush[0], host->dma.err->flush[1], host->dma.err->flush[2], host->dma.err->flush[3], host->dma.err->flush[4], host->dma.err->flush[5]); msmsdcc_reset_and_restore(host); } if (!mrq->data->error) mrq->data->error = -EIO; } dma_unmap_sg(mmc_dev(host->mmc), host->dma.sg, host->dma.num_ents, host->dma.dir); if (host->curr.user_pages) { struct scatterlist *sg = host->dma.sg; int i; for (i = 0; i < host->dma.num_ents; i++, sg++) flush_dcache_page(sg_page(sg)); } host->dma.sg = NULL; host->dma.busy = 0; if ((host->curr.got_dataend && host->curr.got_datablkend) || mrq->data->error) { if (mrq->data->error && !(host->curr.got_dataend && host->curr.got_datablkend)) { pr_info("%s: Worked around bug 1535304\n", mmc_hostname(host->mmc)); } /* * If we've already gotten our DATAEND / DATABLKEND * for this request, then complete it through here. */ msmsdcc_stop_data(host); if (!mrq->data->error) host->curr.data_xfered = host->curr.xfer_size; if (!mrq->data->stop || mrq->cmd->error) { host->curr.mrq = NULL; host->curr.cmd = NULL; mrq->data->bytes_xfered = host->curr.data_xfered; spin_unlock_irqrestore(&host->lock, flags); #ifdef CONFIG_MMC_MSM_PROG_DONE_SCAN if ((mrq->cmd->opcode == SD_IO_RW_EXTENDED) && (mrq->cmd->arg & 0x80000000)) { /* set the prog_scan in a cmd53.*/ host->prog_scan = 1; /* Send STOP to let the SDCC know to stop. */ writel(MCI_CSPM_MCIABORT, host->base + MMCICOMMAND); } #endif mmc_request_done(host->mmc, mrq); return; } else { #if defined (CONFIG_MACH_ACER_A1) while ((host->pdev_id == 1) && (mrq->data->flags & MMC_DATA_WRITE) && (gpio_get_value(SDCC1_DATA_0) == 0)) { udelay(5); } #endif msmsdcc_start_command(host, mrq->data->stop, 0); } } out: spin_unlock_irqrestore(&host->lock, flags); return; }