static int sh_mmcif_init(struct mmc *mmc) { struct sh_mmcif_host *host = mmc_priv(mmc); sh_mmcif_sync_reset(host); sh_mmcif_write(MASK_ALL, &host->regs->ce_int_mask); return 0; }
static void sh_mmcif_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) { struct sh_mmcif_host *host = mmc_priv(mmc); unsigned long flags; spin_lock_irqsave(&host->lock, flags); if (host->state != STATE_IDLE) { dev_dbg(&host->pd->dev, "%s() rejected, state %u\n", __func__, host->state); spin_unlock_irqrestore(&host->lock, flags); return; } host->state = STATE_IOS; spin_unlock_irqrestore(&host->lock, flags); if (ios->power_mode == MMC_POWER_UP) { if (!host->card_present) { /* See if we also get DMA */ sh_mmcif_request_dma(host, host->pd->dev.platform_data); host->card_present = true; } sh_mmcif_set_power(host, ios); } else if (ios->power_mode == MMC_POWER_OFF || !ios->clock) { /* clock stop */ sh_mmcif_clock_control(host, 0); if (ios->power_mode == MMC_POWER_OFF) { if (host->card_present) { sh_mmcif_release_dma(host); host->card_present = false; } } if (host->power) { pm_runtime_put_sync(&host->pd->dev); clk_disable_unprepare(host->hclk); host->power = false; if (ios->power_mode == MMC_POWER_OFF) sh_mmcif_set_power(host, ios); } host->state = STATE_IDLE; return; } if (ios->clock) { if (!host->power) { sh_mmcif_clk_update(host); pm_runtime_get_sync(&host->pd->dev); host->power = true; sh_mmcif_sync_reset(host); } sh_mmcif_clock_control(host, ios->clock); } host->timing = ios->timing; host->bus_width = ios->bus_width; host->state = STATE_IDLE; }
static int sh_mmcif_error_manage(struct sh_mmcif_host *host) { u32 state1, state2; int ret, timeout; host->sd_error = false; state1 = sh_mmcif_readl(host->addr, MMCIF_CE_HOST_STS1); state2 = sh_mmcif_readl(host->addr, MMCIF_CE_HOST_STS2); dev_dbg(&host->pd->dev, "ERR HOST_STS1 = %08x\n", state1); dev_dbg(&host->pd->dev, "ERR HOST_STS2 = %08x\n", state2); if (state1 & STS1_CMDSEQ) { sh_mmcif_bitset(host, MMCIF_CE_CMD_CTRL, CMD_CTRL_BREAK); sh_mmcif_bitset(host, MMCIF_CE_CMD_CTRL, ~CMD_CTRL_BREAK); for (timeout = 10000000; timeout; timeout--) { if (!(sh_mmcif_readl(host->addr, MMCIF_CE_HOST_STS1) & STS1_CMDSEQ)) break; mdelay(1); } if (!timeout) { dev_err(&host->pd->dev, "Forced end of command sequence timeout err\n"); return -EIO; } sh_mmcif_sync_reset(host); dev_dbg(&host->pd->dev, "Forced end of command sequence\n"); return -EIO; } if (state2 & STS2_CRC_ERR) { dev_err(&host->pd->dev, " CRC error: state %u, wait %u\n", host->state, host->wait_for); ret = -EIO; } else if (state2 & STS2_TIMEOUT_ERR) { dev_err(&host->pd->dev, " Timeout: state %u, wait %u\n", host->state, host->wait_for); ret = -ETIMEDOUT; } else { dev_dbg(&host->pd->dev, " End/Index error: state %u, wait %u\n", host->state, host->wait_for); ret = -EIO; } return ret; }
static int sh_mmcif_error_manage(struct sh_mmcif_host *host) { u32 state1, state2; int ret, timeout = 10000000; host->sd_error = 0; host->wait_int = 0; state1 = sh_mmcif_readl(host->addr, MMCIF_CE_HOST_STS1); state2 = sh_mmcif_readl(host->addr, MMCIF_CE_HOST_STS2); pr_debug("%s: ERR HOST_STS1 = %08x\n", DRIVER_NAME, state1); pr_debug("%s: ERR HOST_STS2 = %08x\n", DRIVER_NAME, state2); if (state1 & STS1_CMDSEQ) { sh_mmcif_bitset(host, MMCIF_CE_CMD_CTRL, CMD_CTRL_BREAK); sh_mmcif_bitset(host, MMCIF_CE_CMD_CTRL, ~CMD_CTRL_BREAK); while (1) { timeout--; if (timeout < 0) { pr_err(DRIVER_NAME": Forceed end of " \ "command sequence timeout err\n"); return -EIO; } if (!(sh_mmcif_readl(host->addr, MMCIF_CE_HOST_STS1) & STS1_CMDSEQ)) break; mdelay(1); } sh_mmcif_sync_reset(host); pr_debug(DRIVER_NAME": Forced end of command sequence\n"); return -EIO; } if (state2 & STS2_CRC_ERR) { pr_debug(DRIVER_NAME": Happened CRC error\n"); ret = -EIO; } else if (state2 & STS2_TIMEOUT_ERR) { pr_debug(DRIVER_NAME": Happened Timeout error\n"); ret = -ETIMEDOUT; } else { pr_debug(DRIVER_NAME": Happened End/Index error\n"); ret = -EIO; } return ret; }
static int sh_mmcif_error_manage(struct sh_mmcif_host *host) { u32 state1, state2; int ret, timeout = 10000000; host->sd_error = 0; host->wait_int = 0; state1 = sh_mmcif_read(&host->regs->ce_host_sts1); state2 = sh_mmcif_read(&host->regs->ce_host_sts2); debug("%s: ERR HOST_STS1 = %08x\n", \ DRIVER_NAME, sh_mmcif_read(&host->regs->ce_host_sts1)); debug("%s: ERR HOST_STS2 = %08x\n", \ DRIVER_NAME, sh_mmcif_read(&host->regs->ce_host_sts2)); if (state1 & STS1_CMDSEQ) { debug("%s: Forced end of command sequence\n", DRIVER_NAME); sh_mmcif_bitset(CMD_CTRL_BREAK, &host->regs->ce_cmd_ctrl); sh_mmcif_bitset(~CMD_CTRL_BREAK, &host->regs->ce_cmd_ctrl); while (1) { timeout--; if (timeout < 0) { printf(DRIVER_NAME": Forceed end of " \ "command sequence timeout err\n"); return -EILSEQ; } if (!(sh_mmcif_read(&host->regs->ce_host_sts1) & STS1_CMDSEQ)) break; } sh_mmcif_sync_reset(host); return -EILSEQ; } if (state2 & STS2_CRC_ERR) ret = -EILSEQ; else if (state2 & STS2_TIMEOUT_ERR) ret = TIMEOUT; else ret = -EILSEQ; return ret; }
static int __devinit sh_mmcif_probe(struct platform_device *pdev) { int ret = 0, irq[2]; struct mmc_host *mmc; struct sh_mmcif_host *host = NULL; struct sh_mmcif_plat_data *pd = NULL; struct resource *res; void __iomem *reg; char clk_name[8]; irq[0] = platform_get_irq(pdev, 0); irq[1] = platform_get_irq(pdev, 1); if (irq[0] < 0 || irq[1] < 0) { pr_err(DRIVER_NAME": Get irq error\n"); return -ENXIO; } res = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!res) { dev_err(&pdev->dev, "platform_get_resource error.\n"); return -ENXIO; } reg = ioremap(res->start, resource_size(res)); if (!reg) { dev_err(&pdev->dev, "ioremap error.\n"); return -ENOMEM; } pd = (struct sh_mmcif_plat_data *)(pdev->dev.platform_data); if (!pd) { dev_err(&pdev->dev, "sh_mmcif plat data error.\n"); ret = -ENXIO; goto clean_up; } mmc = mmc_alloc_host(sizeof(struct sh_mmcif_host), &pdev->dev); if (!mmc) { ret = -ENOMEM; goto clean_up; } host = mmc_priv(mmc); host->mmc = mmc; host->addr = reg; host->timeout = 1000; snprintf(clk_name, sizeof(clk_name), "mmc%d", pdev->id); host->hclk = clk_get(&pdev->dev, clk_name); if (IS_ERR(host->hclk)) { dev_err(&pdev->dev, "cannot get clock \"%s\"\n", clk_name); ret = PTR_ERR(host->hclk); goto clean_up1; } clk_enable(host->hclk); host->clk = clk_get_rate(host->hclk); host->pd = pdev; init_waitqueue_head(&host->intr_wait); mmc->ops = &sh_mmcif_ops; mmc->f_max = host->clk; /* close to 400KHz */ if (mmc->f_max < 51200000) mmc->f_min = mmc->f_max / 128; else if (mmc->f_max < 102400000) mmc->f_min = mmc->f_max / 256; else mmc->f_min = mmc->f_max / 512; if (pd->ocr) mmc->ocr_avail = pd->ocr; mmc->caps = MMC_CAP_MMC_HIGHSPEED; if (pd->caps) mmc->caps |= pd->caps; mmc->max_phys_segs = 128; mmc->max_hw_segs = 128; mmc->max_blk_size = 512; mmc->max_blk_count = 65535; mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count; mmc->max_seg_size = mmc->max_req_size; sh_mmcif_sync_reset(host); platform_set_drvdata(pdev, host); mmc_add_host(mmc); ret = request_irq(irq[0], sh_mmcif_intr, 0, "sh_mmc:error", host); if (ret) { pr_err(DRIVER_NAME": request_irq error (sh_mmc:error)\n"); goto clean_up2; } ret = request_irq(irq[1], sh_mmcif_intr, 0, "sh_mmc:int", host); if (ret) { free_irq(irq[0], host); pr_err(DRIVER_NAME": request_irq error (sh_mmc:int)\n"); goto clean_up2; } sh_mmcif_writel(host->addr, MMCIF_CE_INT_MASK, MASK_ALL); sh_mmcif_detect(host->mmc); pr_info("%s: driver version %s\n", DRIVER_NAME, DRIVER_VERSION); pr_debug("%s: chip ver H'%04x\n", DRIVER_NAME, sh_mmcif_readl(host->addr, MMCIF_CE_VERSION) & 0x0000ffff); return ret; clean_up2: clk_disable(host->hclk); clean_up1: mmc_free_host(mmc); clean_up: if (reg) iounmap(reg); return ret; }
static int sh_mmcif_probe(struct platform_device *pdev) { int ret = 0, irq[2]; struct mmc_host *mmc; struct sh_mmcif_host *host; struct sh_mmcif_plat_data *pd = pdev->dev.platform_data; struct resource *res; void __iomem *reg; const char *name; irq[0] = platform_get_irq(pdev, 0); irq[1] = platform_get_irq(pdev, 1); if (irq[0] < 0) { dev_err(&pdev->dev, "Get irq error\n"); return -ENXIO; } res = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!res) { dev_err(&pdev->dev, "platform_get_resource error.\n"); return -ENXIO; } reg = ioremap(res->start, resource_size(res)); if (!reg) { dev_err(&pdev->dev, "ioremap error.\n"); return -ENOMEM; } mmc = mmc_alloc_host(sizeof(struct sh_mmcif_host), &pdev->dev); if (!mmc) { ret = -ENOMEM; goto ealloch; } ret = mmc_of_parse(mmc); if (ret < 0) goto eofparse; host = mmc_priv(mmc); host->mmc = mmc; host->addr = reg; host->timeout = msecs_to_jiffies(10000); host->ccs_enable = !pd || !pd->ccs_unsupported; host->clk_ctrl2_enable = pd && pd->clk_ctrl2_present; host->pd = pdev; spin_lock_init(&host->lock); mmc->ops = &sh_mmcif_ops; sh_mmcif_init_ocr(host); mmc->caps |= MMC_CAP_MMC_HIGHSPEED | MMC_CAP_WAIT_WHILE_BUSY; if (pd && pd->caps) mmc->caps |= pd->caps; mmc->max_segs = 32; mmc->max_blk_size = 512; mmc->max_req_size = PAGE_CACHE_SIZE * mmc->max_segs; mmc->max_blk_count = mmc->max_req_size / mmc->max_blk_size; mmc->max_seg_size = mmc->max_req_size; platform_set_drvdata(pdev, host); pm_runtime_enable(&pdev->dev); host->power = false; host->hclk = clk_get(&pdev->dev, NULL); if (IS_ERR(host->hclk)) { ret = PTR_ERR(host->hclk); dev_err(&pdev->dev, "cannot get clock: %d\n", ret); goto eclkget; } ret = sh_mmcif_clk_update(host); if (ret < 0) goto eclkupdate; ret = pm_runtime_resume(&pdev->dev); if (ret < 0) goto eresume; INIT_DELAYED_WORK(&host->timeout_work, mmcif_timeout_work); sh_mmcif_sync_reset(host); sh_mmcif_writel(host->addr, MMCIF_CE_INT_MASK, MASK_ALL); name = irq[1] < 0 ? dev_name(&pdev->dev) : "sh_mmc:error"; ret = request_threaded_irq(irq[0], sh_mmcif_intr, sh_mmcif_irqt, 0, name, host); if (ret) { dev_err(&pdev->dev, "request_irq error (%s)\n", name); goto ereqirq0; } if (irq[1] >= 0) { ret = request_threaded_irq(irq[1], sh_mmcif_intr, sh_mmcif_irqt, 0, "sh_mmc:int", host); if (ret) { dev_err(&pdev->dev, "request_irq error (sh_mmc:int)\n"); goto ereqirq1; } } if (pd && pd->use_cd_gpio) { ret = mmc_gpio_request_cd(mmc, pd->cd_gpio, 0); if (ret < 0) goto erqcd; } mutex_init(&host->thread_lock); clk_disable_unprepare(host->hclk); ret = mmc_add_host(mmc); if (ret < 0) goto emmcaddh; dev_pm_qos_expose_latency_limit(&pdev->dev, 100); dev_info(&pdev->dev, "driver version %s\n", DRIVER_VERSION); dev_dbg(&pdev->dev, "chip ver H'%04x\n", sh_mmcif_readl(host->addr, MMCIF_CE_VERSION) & 0x0000ffff); return ret; emmcaddh: erqcd: if (irq[1] >= 0) free_irq(irq[1], host); ereqirq1: free_irq(irq[0], host); ereqirq0: pm_runtime_suspend(&pdev->dev); eresume: clk_disable_unprepare(host->hclk); eclkupdate: clk_put(host->hclk); eclkget: pm_runtime_disable(&pdev->dev); eofparse: mmc_free_host(mmc); ealloch: iounmap(reg); return ret; }