static void msmsdcc_handle_irq_data(struct msmsdcc_host *host, u32 status, void __iomem *base) { struct mmc_data *data = host->curr.data; if (!data) return; /* Check for data errors */ if (status & (MCI_DATACRCFAIL | MCI_DATATIMEOUT | MCI_TXUNDERRUN | MCI_RXOVERRUN)) { msmsdcc_data_err(host, data, status); host->curr.data_xfered = 0; if (host->dma.sg) msm_dmov_stop_cmd(host->dma.channel, &host->dma.hdr, 0); else { msmsdcc_stop_data(host); if (!data->stop) msmsdcc_request_end(host, data->mrq); else msmsdcc_start_command(host, data->stop, 0); } } /* Check for data done */ if (!host->curr.got_dataend && (status & MCI_DATAEND)) host->curr.got_dataend = 1; if (!host->curr.got_datablkend && (status & MCI_DATABLOCKEND)) host->curr.got_datablkend = 1; /* * If DMA is still in progress, we complete via the completion handler */ if (host->curr.got_dataend && host->curr.got_datablkend && !host->dma.busy) { /* * There appears to be an issue in the controller where * if you request a small block transfer (< fifo size), * you may get your DATAEND/DATABLKEND irq without the * PIO data irq. * * Check to see if there is still data to be read, * and simulate a PIO irq. */ if (readl(base + MMCISTATUS) & MCI_RXDATAAVLBL) msmsdcc_pio_irq(1, host); msmsdcc_stop_data(host); if (!data->error) host->curr.data_xfered = host->curr.xfer_size; if (!data->stop) msmsdcc_request_end(host, data->mrq); else msmsdcc_start_command(host, data->stop, 0); } }
static void msmsdcc_request_start(struct msmsdcc_host *host, struct mmc_request *mrq) { if (mrq->data && mrq->data->flags & MMC_DATA_READ) { /* Queue/read data, daisy-chain command when data starts */ msmsdcc_start_data(host, mrq->data, mrq->cmd, 0); } else { msmsdcc_start_command(host, mrq->cmd, 0); } }
static void msmsdcc_request(struct mmc_host *mmc, struct mmc_request *mrq) { struct msmsdcc_host *host = mmc_priv(mmc); unsigned long flags; WARN_ON(host->curr.mrq != NULL); WARN_ON(host->pwr == 0); #if defined (CONFIG_MACH_ACER_A1) if (mrq->cmd->opcode == 25) g_IS_SD_CMD25 = 1; #endif spin_lock_irqsave(&host->lock, flags); if (host->eject) { if (mrq->data && !(mrq->data->flags & MMC_DATA_READ)) { mrq->cmd->error = 0; mrq->data->bytes_xfered = mrq->data->blksz * mrq->data->blocks; } else mrq->cmd->error = -ENOMEDIUM; spin_unlock_irqrestore(&host->lock, flags); mmc_request_done(mmc, mrq); return; } host->curr.mrq = mrq; if (host->plat->dummy52_required) { if (host->dummy_52_needed) { if (mrq->data) { host->dummy_52_state = DUMMY_52_STATE_SENT; msmsdcc_start_command(host, &dummy52cmd, MCI_CPSM_PROGENA); spin_unlock_irqrestore(&host->lock, flags); return; } host->dummy_52_needed = 0; } if ((mrq->cmd->opcode == SD_IO_RW_EXTENDED) && (mrq->data)) host->dummy_52_needed = 1; } msmsdcc_request_start(host, mrq); spin_unlock_irqrestore(&host->lock, flags); }
static void msmsdcc_request(struct mmc_host *mmc, struct mmc_request *mrq) { struct msmsdcc_host *host = mmc_priv(mmc); unsigned long flags; WARN_ON(host->curr.mrq != NULL); WARN_ON(host->pwr == 0); spin_lock_irqsave(&host->lock, flags); host->stats.reqs++; if (host->eject) { if (mrq->data && !(mrq->data->flags & MMC_DATA_READ)) { mrq->cmd->error = 0; mrq->data->bytes_xfered = mrq->data->blksz * mrq->data->blocks; } else mrq->cmd->error = -ENOMEDIUM; spin_unlock_irqrestore(&host->lock, flags); mmc_request_done(mmc, mrq); return; } host->curr.mrq = mrq; if (mrq->data && mrq->data->flags & MMC_DATA_READ) msmsdcc_start_data(host, mrq->data); msmsdcc_start_command(host, mrq->cmd, 0); if (host->cmdpoll && !msmsdcc_spin_on_status(host, MCI_CMDRESPEND|MCI_CMDCRCFAIL|MCI_CMDTIMEOUT, CMD_SPINMAX)) { uint32_t status = readl(host->base + MMCISTATUS); msmsdcc_do_cmdirq(host, status); writel(MCI_CMDRESPEND | MCI_CMDCRCFAIL | MCI_CMDTIMEOUT, host->base + MMCICLEAR); host->stats.cmdpoll_hits++; } else { host->stats.cmdpoll_misses++; mod_timer(&host->command_timer, jiffies + HZ); } spin_unlock_irqrestore(&host->lock, flags); }
static void msmsdcc_request(struct mmc_host *mmc, struct mmc_request *mrq) { struct msmsdcc_host *host = mmc_priv(mmc); unsigned long flags; WARN_ON(host->curr.mrq != NULL); WARN_ON(host->pwr == 0); spin_lock_irqsave(&host->lock, flags); if (host->eject) { if (mrq->data && !(mrq->data->flags & MMC_DATA_READ)) { mrq->cmd->error = 0; mrq->data->bytes_xfered = mrq->data->blksz * mrq->data->blocks; } else mrq->cmd->error = -ENOMEDIUM; spin_unlock_irqrestore(&host->lock, flags); mmc_request_done(mmc, mrq); return; } host->curr.mrq = mrq; if (mrq->data && mrq->data->flags & MMC_DATA_READ) msmsdcc_start_data(host, mrq->data); msmsdcc_start_command(host, mrq->cmd, 0); #ifdef CONFIG_LGE_BCM432X_PATCH mod_timer(&host->command_timer, jiffies + 2* HZ); #else /* CONFIG_LGE_BCM432X_PATCH */ mod_timer(&host->command_timer, jiffies + HZ); #endif /* CONFIG_LGE_BCM432X_PATCH */ spin_unlock_irqrestore(&host->lock, flags); }
static irqreturn_t msmsdcc_irq(int irq, void *dev_id) { struct msmsdcc_host *host = dev_id; void __iomem *base = host->base; u32 status; int ret = 0; int cardint = 0; spin_lock(&host->lock); do { struct mmc_data *data; status = readl(base + MMCISTATUS); #if IRQ_DEBUG msmsdcc_print_status(host, "irq0-r", status); #endif status &= (readl(base + MMCIMASK0) | MCI_DATABLOCKENDMASK); writel(status, base + MMCICLEAR); #if IRQ_DEBUG msmsdcc_print_status(host, "irq0-p", status); #endif data = host->curr.data; if (data) { /* Check for data errors */ if (status & (MCI_DATACRCFAIL|MCI_DATATIMEOUT| MCI_TXUNDERRUN|MCI_RXOVERRUN)) { msmsdcc_data_err(host, data, status); host->curr.data_xfered = 0; if (host->dma.sg) msm_dmov_stop_cmd(host->dma.channel, &host->dma.hdr, 0); else { msmsdcc_stop_data(host); if (!data->stop) msmsdcc_request_end(host, data->mrq); else msmsdcc_start_command(host, data->stop, 0); } } /* Check for data done */ if (!host->curr.got_dataend && (status & MCI_DATAEND)) host->curr.got_dataend = 1; if (!host->curr.got_datablkend && (status & MCI_DATABLOCKEND)) { host->curr.got_datablkend = 1; } if (host->curr.got_dataend && host->curr.got_datablkend) { /* * If DMA is still in progress, we complete * via the completion handler */ if (!host->dma.busy) { /* * There appears to be an issue in the * controller where if you request a * small block transfer (< fifo size), * you may get your DATAEND/DATABLKEND * irq without the PIO data irq. * * Check to see if theres still data * to be read, and simulate a PIO irq. */ if (readl(base + MMCISTATUS) & MCI_RXDATAAVLBL) msmsdcc_pio_irq(1, host); msmsdcc_stop_data(host); if (!data->error) host->curr.data_xfered = host->curr.xfer_size; if (!data->stop) msmsdcc_request_end(host, data->mrq); else msmsdcc_start_command(host, data->stop, 0); } } } if (status & (MCI_CMDSENT | MCI_CMDRESPEND | MCI_CMDCRCFAIL | MCI_CMDTIMEOUT) && host->curr.cmd) { msmsdcc_do_cmdirq(host, status); } if (status & MCI_SDIOINTOPER) { cardint = 1; status &= ~MCI_SDIOINTOPER; } ret = 1; } while (status); spin_unlock(&host->lock); /* * We have to delay handling the card interrupt as it calls * back into the driver. */ if (cardint) { mmc_signal_sdio_irq(host->mmc); } return IRQ_RETVAL(ret); }
static void msmsdcc_dma_complete_func(struct msm_dmov_cmd *cmd, unsigned int result, struct msm_dmov_errdata *err) { struct msmsdcc_dma_data *dma_data = container_of(cmd, struct msmsdcc_dma_data, hdr); struct msmsdcc_host *host = dma_data->host; unsigned long flags; struct mmc_request *mrq; spin_lock_irqsave(&host->lock, flags); mrq = host->curr.mrq; BUG_ON(!mrq); if (!(result & DMOV_RSLT_VALID)) { printk(KERN_ERR "msmsdcc: Invalid DataMover result\n"); goto out; } if (result & DMOV_RSLT_DONE) { host->curr.data_xfered = host->curr.xfer_size; } else { /* Error or flush */ if (result & DMOV_RSLT_ERROR) printk(KERN_ERR "%s: DMA error (0x%.8x)\n", mmc_hostname(host->mmc), result); if (result & DMOV_RSLT_FLUSH) printk(KERN_ERR "%s: DMA channel flushed (0x%.8x)\n", mmc_hostname(host->mmc), result); if (err) printk(KERN_ERR "Flush data: %.8x %.8x %.8x %.8x %.8x %.8x\n", err->flush[0], err->flush[1], err->flush[2], err->flush[3], err->flush[4], err->flush[5]); if (!mrq->data->error) mrq->data->error = -EIO; } host->dma.busy = 0; dma_unmap_sg(mmc_dev(host->mmc), host->dma.sg, host->dma.num_ents, host->dma.dir); if (host->curr.user_pages) { struct scatterlist *sg = host->dma.sg; int i; for (i = 0; i < host->dma.num_ents; i++, sg++) flush_dcache_page(sg_page(sg)); } host->dma.sg = NULL; if ((host->curr.got_dataend && host->curr.got_datablkend) || mrq->data->error) { /* * If we've already gotten our DATAEND / DATABLKEND * for this request, then complete it through here. */ msmsdcc_stop_data(host); if (!mrq->data->error) host->curr.data_xfered = host->curr.xfer_size; if (!mrq->data->stop || mrq->cmd->error) { writel(0, host->base + MMCICOMMAND); host->curr.mrq = NULL; host->curr.cmd = NULL; mrq->data->bytes_xfered = host->curr.data_xfered; spin_unlock_irqrestore(&host->lock, flags); mmc_request_done(host->mmc, mrq); return; } else msmsdcc_start_command(host, mrq->data->stop, 0); } out: spin_unlock_irqrestore(&host->lock, flags); return; }
static irqreturn_t msmsdcc_irq(int irq, void *dev_id) { struct msmsdcc_host *host = dev_id; void __iomem *base = host->base; u32 status; int ret = 0; int timer = 0; spin_lock(&host->lock); do { struct mmc_command *cmd; struct mmc_data *data; if (timer) { timer = 0; msmsdcc_delay(host); } status = readl(host->base + MMCISTATUS); #if IRQ_DEBUG msmsdcc_print_status(host, "irq0-r", status); #endif status &= (readl(host->base + MMCIMASK0) | MCI_DATABLOCKENDMASK); writel(status, host->base + MMCICLEAR); #if IRQ_DEBUG msmsdcc_print_status(host, "irq0-p", status); #endif if ((host->plat->dummy52_required) && (host->dummy_52_state == DUMMY_52_STATE_SENT)) { if (status & MCI_PROGDONE) { host->dummy_52_state = DUMMY_52_STATE_NONE; host->curr.cmd = NULL; spin_unlock(&host->lock); msmsdcc_request_start(host, host->curr.mrq); return IRQ_HANDLED; } break; } data = host->curr.data; #ifdef CONFIG_MMC_MSM_SDIO_SUPPORT if (status & MCI_SDIOINTROPE) mmc_signal_sdio_irq(host->mmc); #endif /* * Check for proper command response */ cmd = host->curr.cmd; if ((status & (MCI_CMDSENT | MCI_CMDRESPEND | MCI_CMDCRCFAIL | MCI_CMDTIMEOUT | MCI_PROGDONE)) && cmd) { host->curr.cmd = NULL; cmd->resp[0] = readl(base + MMCIRESPONSE0); cmd->resp[1] = readl(base + MMCIRESPONSE1); cmd->resp[2] = readl(base + MMCIRESPONSE2); cmd->resp[3] = readl(base + MMCIRESPONSE3); if (status & MCI_CMDTIMEOUT) { #if VERBOSE_COMMAND_TIMEOUTS pr_err("%s: Command timeout\n", mmc_hostname(host->mmc)); #endif cmd->error = -ETIMEDOUT; } else if (status & MCI_CMDCRCFAIL && cmd->flags & MMC_RSP_CRC) { pr_err("%s: Command CRC error\n", mmc_hostname(host->mmc)); cmd->error = -EILSEQ; } if (!cmd->data || cmd->error) { if (host->curr.data && host->dma.sg) msm_dmov_stop_cmd(host->dma.channel, &host->dma.hdr, 0); else if (host->curr.data) { /* Non DMA */ msmsdcc_stop_data(host); timer |= msmsdcc_request_end(host, cmd->mrq); } else { /* host->data == NULL */ if (!cmd->error && host->prog_enable) { if (status & MCI_PROGDONE) { host->prog_scan = 0; host->prog_enable = 0; timer |= msmsdcc_request_end( host, cmd->mrq); } else host->curr.cmd = cmd; } else { if (host->prog_enable) { host->prog_scan = 0; host->prog_enable = 0; } timer |= msmsdcc_request_end( host, cmd->mrq); } } } else if (cmd->data) { if (!(cmd->data->flags & MMC_DATA_READ)) msmsdcc_start_data(host, cmd->data, NULL, 0); } } if (data) { /* Check for data errors */ if (status & (MCI_DATACRCFAIL|MCI_DATATIMEOUT| MCI_TXUNDERRUN|MCI_RXOVERRUN)) { msmsdcc_data_err(host, data, status); host->curr.data_xfered = 0; if (host->dma.sg) msm_dmov_stop_cmd(host->dma.channel, &host->dma.hdr, 0); else { msmsdcc_reset_and_restore(host); if (host->curr.data) msmsdcc_stop_data(host); if (!data->stop) timer |= msmsdcc_request_end(host, data->mrq); else { msmsdcc_start_command(host, data->stop, 0); timer = 1; } } } /* Check for data done */ if (!host->curr.got_dataend && (status & MCI_DATAEND)) host->curr.got_dataend = 1; if (!host->curr.got_datablkend && (status & MCI_DATABLOCKEND)) { host->curr.got_datablkend = 1; } if (host->curr.got_dataend && host->curr.got_datablkend) { /* * If DMA is still in progress, we complete * via the completion handler */ if (!host->dma.busy) { /* * There appears to be an issue in the * controller where if you request a * small block transfer (< fifo size), * you may get your DATAEND/DATABLKEND * irq without the PIO data irq. * * Check to see if theres still data * to be read, and simulate a PIO irq. */ if (readl(host->base + MMCISTATUS) & MCI_RXDATAAVLBL) msmsdcc_pio_irq(1, host); msmsdcc_stop_data(host); if (!data->error) host->curr.data_xfered = host->curr.xfer_size; if (!data->stop) timer |= msmsdcc_request_end( host, data->mrq); else { #if defined (CONFIG_MACH_ACER_A1) while ((host->pdev_id == 1) && (data->flags & MMC_DATA_WRITE) && (gpio_get_value(SDCC1_DATA_0) == 0)) { udelay(5); } #endif msmsdcc_start_command(host, data->stop, 0); timer = 1; } } } } ret = 1; } while (status); spin_unlock(&host->lock); return IRQ_RETVAL(ret); }
static void msmsdcc_start_data(struct msmsdcc_host *host, struct mmc_data *data, struct mmc_command *cmd, u32 c) { unsigned int datactrl, timeout; unsigned long long clks; void __iomem *base = host->base; unsigned int pio_irqmask = 0; host->curr.data = data; host->curr.xfer_size = data->blksz * data->blocks; host->curr.xfer_remain = host->curr.xfer_size; host->curr.data_xfered = 0; host->curr.got_dataend = 0; host->curr.got_datablkend = 0; memset(&host->pio, 0, sizeof(host->pio)); datactrl = MCI_DPSM_ENABLE | (data->blksz << 4); if (!msmsdcc_config_dma(host, data)) datactrl |= MCI_DPSM_DMAENABLE; else { host->pio.sg = data->sg; host->pio.sg_len = data->sg_len; host->pio.sg_off = 0; if (data->flags & MMC_DATA_READ) { pio_irqmask = MCI_RXFIFOHALFFULLMASK; if (host->curr.xfer_remain < MCI_FIFOSIZE) pio_irqmask |= MCI_RXDATAAVLBLMASK; } else pio_irqmask = MCI_TXFIFOHALFEMPTYMASK; } if (data->flags & MMC_DATA_READ) datactrl |= MCI_DPSM_DIRECTION; clks = (unsigned long long)data->timeout_ns * host->clk_rate; do_div(clks, 1000000000UL); timeout = data->timeout_clks + (unsigned int)clks*2 ; #if defined (CONFIG_MACH_ACER_A1) timeout *= 10; #endif if (datactrl & MCI_DPSM_DMAENABLE) { /* Save parameters for the exec function */ host->cmd_timeout = timeout; host->cmd_pio_irqmask = pio_irqmask; host->cmd_datactrl = datactrl; host->cmd_cmd = cmd; host->dma.hdr.exec_func = msmsdcc_dma_exec_func; host->dma.hdr.user = (void *)host; host->dma.busy = 1; if (cmd) { msmsdcc_start_command_deferred(host, cmd, &c); host->cmd_c = c; } dsb(); msm_dmov_enqueue_cmd_ext(host->dma.channel, &host->dma.hdr); if (data->flags & MMC_DATA_WRITE) host->prog_scan = 1; } else { writel(timeout, base + MMCIDATATIMER); writel(host->curr.xfer_size, base + MMCIDATALENGTH); writel(pio_irqmask, base + MMCIMASK1); msmsdcc_delay(host); /* Allow parms to be applied */ writel(datactrl, base + MMCIDATACTRL); if (cmd) { msmsdcc_delay(host); /* Delay between data/command */ /* Daisy-chain the command if requested */ msmsdcc_start_command(host, cmd, c); } } }
static void msmsdcc_dma_complete_tlet(unsigned long data) { struct msmsdcc_host *host = (struct msmsdcc_host *)data; unsigned long flags; struct mmc_request *mrq; spin_lock_irqsave(&host->lock, flags); mrq = host->curr.mrq; BUG_ON(!mrq); if (!(host->dma.result & DMOV_RSLT_VALID)) { pr_err("msmsdcc: Invalid DataMover result\n"); goto out; } if (host->dma.result & DMOV_RSLT_DONE) { host->curr.data_xfered = host->curr.xfer_size; } else { /* Error or flush */ if (host->dma.result & DMOV_RSLT_ERROR) pr_err("%s: DMA error (0x%.8x)\n", mmc_hostname(host->mmc), host->dma.result); if (host->dma.result & DMOV_RSLT_FLUSH) pr_err("%s: DMA channel flushed (0x%.8x)\n", mmc_hostname(host->mmc), host->dma.result); if (host->dma.err) { pr_err("Flush data: %.8x %.8x %.8x %.8x %.8x %.8x\n", host->dma.err->flush[0], host->dma.err->flush[1], host->dma.err->flush[2], host->dma.err->flush[3], host->dma.err->flush[4], host->dma.err->flush[5]); msmsdcc_reset_and_restore(host); } if (!mrq->data->error) mrq->data->error = -EIO; } dma_unmap_sg(mmc_dev(host->mmc), host->dma.sg, host->dma.num_ents, host->dma.dir); if (host->curr.user_pages) { struct scatterlist *sg = host->dma.sg; int i; for (i = 0; i < host->dma.num_ents; i++, sg++) flush_dcache_page(sg_page(sg)); } host->dma.sg = NULL; host->dma.busy = 0; if ((host->curr.got_dataend && host->curr.got_datablkend) || mrq->data->error) { if (mrq->data->error && !(host->curr.got_dataend && host->curr.got_datablkend)) { pr_info("%s: Worked around bug 1535304\n", mmc_hostname(host->mmc)); } /* * If we've already gotten our DATAEND / DATABLKEND * for this request, then complete it through here. */ msmsdcc_stop_data(host); if (!mrq->data->error) host->curr.data_xfered = host->curr.xfer_size; if (!mrq->data->stop || mrq->cmd->error) { host->curr.mrq = NULL; host->curr.cmd = NULL; mrq->data->bytes_xfered = host->curr.data_xfered; spin_unlock_irqrestore(&host->lock, flags); #ifdef CONFIG_MMC_MSM_PROG_DONE_SCAN if ((mrq->cmd->opcode == SD_IO_RW_EXTENDED) && (mrq->cmd->arg & 0x80000000)) { /* set the prog_scan in a cmd53.*/ host->prog_scan = 1; /* Send STOP to let the SDCC know to stop. */ writel(MCI_CSPM_MCIABORT, host->base + MMCICOMMAND); } #endif mmc_request_done(host->mmc, mrq); return; } else { #if defined (CONFIG_MACH_ACER_A1) while ((host->pdev_id == 1) && (mrq->data->flags & MMC_DATA_WRITE) && (gpio_get_value(SDCC1_DATA_0) == 0)) { udelay(5); } #endif msmsdcc_start_command(host, mrq->data->stop, 0); } } out: spin_unlock_irqrestore(&host->lock, flags); return; }