struct sdhci_host *sdhci_alloc_host(struct vmm_device *dev, int extra) { struct mmc_host *mmc; struct sdhci_host *host; mmc = mmc_alloc_host(sizeof(struct sdhci_host) + extra, dev); if (!mmc) { return NULL; } host = mmc_priv(mmc); host->mmc = mmc; host->dev = dev; return host; }
static int modem_detect_host(const char *host_id) { /* HACK!!! * Rely on mmc->class_dev.class set in mmc_alloc_host * Tricky part: a new mmc hook is being (temporary) created * to discover mmc_host class. * Do you know more elegant way how to enumerate mmc_hosts? */ struct mmc_host *mmc = NULL; struct class_dev_iter iter; struct device *dev; int ret = -1; #if 1 printk("[C2K] before alloc host\n"); mmc = mmc_alloc_host(0, NULL); if (!mmc){ printk("[C2K] mmc_aloc_host error\n"); ret = -ENOMEM; goto out; } printk("[C2K] mmc_aloc_host success\n"); BUG_ON(!mmc->class_dev.class); class_dev_iter_init(&iter, mmc->class_dev.class, NULL, NULL); for (;;) { dev = class_dev_iter_next(&iter); if (!dev) { printk("[C2K] class dev iter next failed\n"); LOGPRT(LOG_ERR, "%s: %d\n", __func__, __LINE__); break; } else { struct mmc_host *host = container_of(dev, struct mmc_host, class_dev); if (dev_name(&host->class_dev) && strcmp(dev_name(&host->class_dev), host_id)) continue; ret = 0; break; } } mmc_free_host(mmc); #endif //ret = 0; out: return ret; }
static int modem_detect_card(struct cbp_reset *cbp_rst_ind) { /* HACK!!! * Rely on mmc->class_dev.class set in mmc_alloc_host * Tricky part: a new mmc hook is being (temporary) created * to discover mmc_host class. * Do you know more elegant way how to enumerate mmc_hosts? */ struct mmc_host *mmc = NULL; struct class_dev_iter iter; struct device *dev; int ret = -1; mmc = mmc_alloc_host(0, NULL); if (!mmc){ ret = -ENOMEM; goto out; } BUG_ON(!mmc->class_dev.class); class_dev_iter_init(&iter, mmc->class_dev.class, NULL, NULL); for (;;) { dev = class_dev_iter_next(&iter); if (!dev) { break; } else { struct mmc_host *host = container_of(dev, struct mmc_host, class_dev); if (dev_name(&host->class_dev) && strcmp(dev_name(&host->class_dev), MDM_MMC_ID)){ printk("[MODEM SDIO] detect card not match\n"); continue; } printk("[MODEM SDIO] detect card matched\n"); cbp_rst_ind->host = host; mmc_detect_change(host, 0); ret = 0; break; } } mmc_free_host(mmc); out: return ret; }
static int cw1200_detect_card(const struct cw1200_platform_data *pdata) { /* HACK!!! * Rely on mmc->class_dev.class set in mmc_alloc_host * Tricky part: a new mmc hook is being (temporary) created * to discover mmc_host class. * Do you know more elegant way how to enumerate mmc_hosts? */ struct mmc_host *mmc = NULL; struct class_dev_iter iter; struct device *dev; mmc = mmc_alloc_host(0, NULL); if (!mmc) return -ENOMEM; BUG_ON(!mmc->class_dev.class); class_dev_iter_init(&iter, mmc->class_dev.class, NULL, NULL); for (;;) { dev = class_dev_iter_next(&iter); if (!dev) { printk(KERN_ERR "cw1200: %s is not found.\n", pdata->mmc_id); break; } else { struct mmc_host *host = container_of(dev, struct mmc_host, class_dev); if (dev_name(&host->class_dev) && strcmp(dev_name(&host->class_dev), pdata->mmc_id)) continue; mmc_detect_change(host, 10); break; } } mmc_free_host(mmc); return 0; }
static int __devinit sh_mmcif_probe(struct platform_device *pdev) { int ret = 0, irq[2]; struct mmc_host *mmc; struct sh_mmcif_host *host = NULL; struct sh_mmcif_plat_data *pd = NULL; struct resource *res; void __iomem *reg; char clk_name[8]; irq[0] = platform_get_irq(pdev, 0); irq[1] = platform_get_irq(pdev, 1); if (irq[0] < 0 || irq[1] < 0) { pr_err(DRIVER_NAME": Get irq error\n"); return -ENXIO; } res = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!res) { dev_err(&pdev->dev, "platform_get_resource error.\n"); return -ENXIO; } reg = ioremap(res->start, resource_size(res)); if (!reg) { dev_err(&pdev->dev, "ioremap error.\n"); return -ENOMEM; } pd = (struct sh_mmcif_plat_data *)(pdev->dev.platform_data); if (!pd) { dev_err(&pdev->dev, "sh_mmcif plat data error.\n"); ret = -ENXIO; goto clean_up; } mmc = mmc_alloc_host(sizeof(struct sh_mmcif_host), &pdev->dev); if (!mmc) { ret = -ENOMEM; goto clean_up; } host = mmc_priv(mmc); host->mmc = mmc; host->addr = reg; host->timeout = 1000; snprintf(clk_name, sizeof(clk_name), "mmc%d", pdev->id); host->hclk = clk_get(&pdev->dev, clk_name); if (IS_ERR(host->hclk)) { dev_err(&pdev->dev, "cannot get clock \"%s\"\n", clk_name); ret = PTR_ERR(host->hclk); goto clean_up1; } clk_enable(host->hclk); host->clk = clk_get_rate(host->hclk); host->pd = pdev; init_waitqueue_head(&host->intr_wait); mmc->ops = &sh_mmcif_ops; mmc->f_max = host->clk; /* close to 400KHz */ if (mmc->f_max < 51200000) mmc->f_min = mmc->f_max / 128; else if (mmc->f_max < 102400000) mmc->f_min = mmc->f_max / 256; else mmc->f_min = mmc->f_max / 512; if (pd->ocr) mmc->ocr_avail = pd->ocr; mmc->caps = MMC_CAP_MMC_HIGHSPEED; if (pd->caps) mmc->caps |= pd->caps; mmc->max_phys_segs = 128; mmc->max_hw_segs = 128; mmc->max_blk_size = 512; mmc->max_blk_count = 65535; mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count; mmc->max_seg_size = mmc->max_req_size; sh_mmcif_sync_reset(host); platform_set_drvdata(pdev, host); mmc_add_host(mmc); ret = request_irq(irq[0], sh_mmcif_intr, 0, "sh_mmc:error", host); if (ret) { pr_err(DRIVER_NAME": request_irq error (sh_mmc:error)\n"); goto clean_up2; } ret = request_irq(irq[1], sh_mmcif_intr, 0, "sh_mmc:int", host); if (ret) { free_irq(irq[0], host); pr_err(DRIVER_NAME": request_irq error (sh_mmc:int)\n"); goto clean_up2; } sh_mmcif_writel(host->addr, MMCIF_CE_INT_MASK, MASK_ALL); sh_mmcif_detect(host->mmc); pr_info("%s: driver version %s\n", DRIVER_NAME, DRIVER_VERSION); pr_debug("%s: chip ver H'%04x\n", DRIVER_NAME, sh_mmcif_readl(host->addr, MMCIF_CE_VERSION) & 0x0000ffff); return ret; clean_up2: clk_disable(host->hclk); clean_up1: mmc_free_host(mmc); clean_up: if (reg) iounmap(reg); return ret; }
/* * ubicom32sd_probe */ static int __devinit ubicom32sd_probe(struct platform_device *pdev) { struct ubicom32sd_platform_data *pdata = (struct ubicom32sd_platform_data *)pdev->dev.platform_data; struct mmc_host *mmc; struct ubicom32sd_data *ud; struct resource *res_regs; struct resource *res_irq_tx; struct resource *res_irq_rx; int ret; /* * Get our resources, regs is the hardware driver base address * and the tx and rx irqs are used to communicate with the * hardware driver. */ res_regs = platform_get_resource(pdev, IORESOURCE_MEM, 0); res_irq_tx = platform_get_resource(pdev, IORESOURCE_IRQ, 0); res_irq_rx = platform_get_resource(pdev, IORESOURCE_IRQ, 1); if (!res_regs || !res_irq_tx || !res_irq_rx) { ret = -EINVAL; goto fail; } /* * Reserve any gpios we need */ if (pdata->cards[0].pin_wp != -1) { ret = gpio_request(pdata->cards[0].pin_wp, "sd-wp"); if (ret) { goto fail; } gpio_direction_input(pdata->cards[0].pin_wp); } ret = gpio_request(pdata->cards[0].pin_cd, "sd-cd"); if (ret) { goto fail_cd; } gpio_direction_input(pdata->cards[0].pin_cd); /* * HACK: for the dual port controller on port F, we don't support the second port right now */ if (pdata->ncards > 1) { ret = gpio_request(pdata->cards[1].pin_pwr, "sd-pwr"); gpio_direction_output(pdata->cards[1].pin_pwr, !pdata->cards[1].pwr_polarity); gpio_direction_output(pdata->cards[1].pin_pwr, pdata->cards[1].pwr_polarity); } ret = gpio_request(pdata->cards[0].pin_pwr, "sd-pwr"); if (ret) { goto fail_pwr; } gpio_direction_output(pdata->cards[0].pin_pwr, !pdata->cards[0].pwr_polarity); /* * Allocate the MMC driver, it includes memory for our data. */ mmc = mmc_alloc_host(sizeof(struct ubicom32sd_data), &pdev->dev); if (!mmc) { ret = -ENOMEM; goto fail_mmc; } ud = (struct ubicom32sd_data *)mmc_priv(mmc); ud->mmc = mmc; ud->pdata = pdata; ud->regs = (struct sdtio_vp_regs *)res_regs->start; ud->irq_tx = res_irq_tx->start; ud->irq_rx = res_irq_rx->start; platform_set_drvdata(pdev, mmc); ret = request_irq(ud->irq_rx, ubicom32sd_interrupt, IRQF_DISABLED, mmc_hostname(mmc), mmc); if (ret) { goto fail_mmc; } /* * Fill in the mmc structure */ mmc->ops = &ubicom32sd_ops; mmc->caps = MMC_CAP_4_BIT_DATA | MMC_CAP_NEEDS_POLL | MMC_CAP_SDIO_IRQ | MMC_CAP_MMC_HIGHSPEED | MMC_CAP_SD_HIGHSPEED; mmc->f_min = ud->regs->f_min; mmc->f_max = ud->regs->f_max; mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34; /* * Setup some restrictions on transfers * * We allow up to SDTIO_MAX_SG_BLOCKS of data to DMA into, there are * not really any "max_seg_size", "max_req_size", or "max_blk_count" * restrictions (must be less than U32_MAX though), pick * something large?!... * * The hardware can do up to 4095 bytes per block, since the spec * only requires 2048, we'll set it to that and not worry about * potential weird blk lengths. */ mmc->max_hw_segs = SDTIO_MAX_SG_BLOCKS; mmc->max_phys_segs = SDTIO_MAX_SG_BLOCKS; mmc->max_seg_size = 1024 * 1024; mmc->max_req_size = 1024 * 1024; mmc->max_blk_count = 1024; mmc->max_blk_size = 2048; ubicom32sd_reset(ud); /* * enable interrupts */ ud->int_en = 0; ubicom32sd_send_command_sync(ud, SDTIO_COMMAND_SETUP_SDIO << SDTIO_COMMAND_SHIFT | SDTIO_COMMAND_FLAG_SDIO_INT_EN, 0); mmc_add_host(mmc); printk(KERN_INFO "%s at %p, irq %d/%d f=%u-%u\n", mmc_hostname(mmc), ud->regs, ud->irq_tx, ud->irq_rx, mmc->f_min, mmc->f_max); return 0; fail_mmc: gpio_free(pdata->cards[0].pin_pwr); fail_pwr: gpio_free(pdata->cards[0].pin_cd); fail_cd: if (pdata->cards[0].pin_wp != -1) { gpio_free(pdata->cards[0].pin_wp); } fail: return ret; }
static int __init mmc_omap_new_slot(struct mmc_omap_host *host, int id) { struct mmc_omap_slot *slot = NULL; struct mmc_host *mmc; int r; mmc = mmc_alloc_host(sizeof(struct mmc_omap_slot), host->dev); if (mmc == NULL) return -ENOMEM; slot = mmc_priv(mmc); slot->host = host; slot->mmc = mmc; slot->id = id; slot->pdata = &host->pdata->slots[id]; host->slots[id] = slot; mmc->caps = 0; if (host->pdata->slots[id].wires >= 4) mmc->caps |= MMC_CAP_4_BIT_DATA; mmc->ops = &mmc_omap_ops; mmc->f_min = 400000; if (cpu_class_is_omap2()) mmc->f_max = 48000000; else mmc->f_max = 24000000; if (host->pdata->max_freq) mmc->f_max = min(host->pdata->max_freq, mmc->f_max); mmc->ocr_avail = slot->pdata->ocr_mask; /* Use scatterlist DMA to reduce per-transfer costs. * NOTE max_seg_size assumption that small blocks aren't * normally used (except e.g. for reading SD registers). */ mmc->max_phys_segs = 32; mmc->max_hw_segs = 32; mmc->max_blk_size = 2048; /* BLEN is 11 bits (+1) */ mmc->max_blk_count = 2048; /* NBLK is 11 bits (+1) */ mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count; mmc->max_seg_size = mmc->max_req_size; r = mmc_add_host(mmc); if (r < 0) goto err_remove_host; if (slot->pdata->name != NULL) { r = device_create_file(&mmc->class_dev, &dev_attr_slot_name); if (r < 0) goto err_remove_host; } if (slot->pdata->get_cover_state != NULL) { r = device_create_file(&mmc->class_dev, &dev_attr_cover_switch); if (r < 0) goto err_remove_slot_name; setup_timer(&slot->cover_timer, mmc_omap_cover_timer, (unsigned long)slot); tasklet_init(&slot->cover_tasklet, mmc_omap_cover_handler, (unsigned long)slot); tasklet_schedule(&slot->cover_tasklet); } return 0; err_remove_slot_name: if (slot->pdata->name != NULL) device_remove_file(&mmc->class_dev, &dev_attr_slot_name); err_remove_host: mmc_remove_host(mmc); mmc_free_host(mmc); return r; }
static int bcm2835_sdhost_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; struct device_node *node = dev->of_node; struct clk *clk; struct resource *iomem; struct bcm2835_host *host; struct mmc_host *mmc; int ret; pr_debug("bcm2835_sdhost_probe\n"); mmc = mmc_alloc_host(sizeof(*host), dev); if (!mmc) return -ENOMEM; mmc->ops = &bcm2835_sdhost_ops; host = mmc_priv(mmc); host->mmc = mmc; host->pio_timeout = msecs_to_jiffies(500); host->max_delay = 1; /* Warn if over 1ms */ spin_lock_init(&host->lock); iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0); host->ioaddr = devm_ioremap_resource(dev, iomem); if (IS_ERR(host->ioaddr)) { ret = PTR_ERR(host->ioaddr); goto err; } host->phys_addr = iomem->start + BCM2835_VCMMU_SHIFT; pr_debug(" - ioaddr %lx, iomem->start %lx, phys_addr %lx\n", (unsigned long)host->ioaddr, (unsigned long)iomem->start, (unsigned long)host->phys_addr); host->allow_dma = ALLOW_DMA; if (node) { /* Read any custom properties */ of_property_read_u32(node, "brcm,delay-after-stop", &host->delay_after_stop); of_property_read_u32(node, "brcm,overclock-50", &host->overclock_50); of_property_read_u32(node, "brcm,pio-limit", &host->pio_limit); host->allow_dma = ALLOW_DMA && !of_property_read_bool(node, "brcm,force-pio"); host->debug = of_property_read_bool(node, "brcm,debug"); } if (host->allow_dma) { if (node) { host->dma_chan_tx = dma_request_slave_channel(dev, "tx"); host->dma_chan_rx = dma_request_slave_channel(dev, "rx"); } else { dma_cap_mask_t mask; dma_cap_zero(mask); /* we don't care about the channel, any would work */ dma_cap_set(DMA_SLAVE, mask); host->dma_chan_tx = dma_request_channel(mask, NULL, NULL); host->dma_chan_rx = dma_request_channel(mask, NULL, NULL); } } clk = devm_clk_get(dev, NULL); if (IS_ERR(clk)) { dev_err(dev, "could not get clk\n"); ret = PTR_ERR(clk); goto err; } host->max_clk = clk_get_rate(clk); host->irq = platform_get_irq(pdev, 0); if (host->irq <= 0) { dev_err(dev, "get IRQ failed\n"); ret = -EINVAL; goto err; } pr_debug(" - max_clk %lx, irq %d\n", (unsigned long)host->max_clk, (int)host->irq); if (node) mmc_of_parse(mmc); else mmc->caps |= MMC_CAP_4_BIT_DATA; ret = bcm2835_sdhost_add_host(host); if (ret) goto err; platform_set_drvdata(pdev, host); pr_debug("bcm2835_sdhost_probe -> OK\n"); return 0; err: pr_debug("bcm2835_sdhost_probe -> err %d\n", ret); mmc_free_host(mmc); return ret; }
static int imxmci_probe(struct platform_device *pdev) { struct mmc_host *mmc; struct imxmci_host *host = NULL; struct resource *r; int ret = 0, irq; printk(KERN_INFO "i.MX mmc driver\n"); r = platform_device_resource(pdev, IORESOURCE_MEM, 0); irq = platform_device_irq(pdev, 0); if (!r || irq == NO_IRQ) return -ENXIO; r = request_mem_region(r->start, 0x100, "IMXMCI"); if (!r) return -EBUSY; mmc = mmc_alloc_host(sizeof(struct imxmci_host), &pdev->dev); if (!mmc) { ret = -ENOMEM; goto out; } mmc->ops = &imxmci_ops; mmc->f_min = 150000; mmc->f_max = CLK_RATE/2; mmc->ocr_avail = MMC_VDD_32_33; mmc->caps = MMC_CAP_4_BIT_DATA | MMC_CAP_BYTEBLOCK; /* MMC core transfer sizes tunable parameters */ mmc->max_hw_segs = 64; mmc->max_phys_segs = 64; mmc->max_sectors = 64; /* default 1 << (PAGE_CACHE_SHIFT - 9) */ mmc->max_seg_size = 64*512; /* default PAGE_CACHE_SIZE */ host = mmc_priv(mmc); host->mmc = mmc; host->dma_allocated = 0; host->pdata = pdev->dev.platform_data; spin_lock_init(&host->lock); host->res = r; host->irq = irq; imx_gpio_mode(PB8_PF_SD_DAT0); imx_gpio_mode(PB9_PF_SD_DAT1); imx_gpio_mode(PB10_PF_SD_DAT2); /* Configured as GPIO with pull-up to ensure right MCC card mode */ /* Switched to PB11_PF_SD_DAT3 if 4 bit bus is configured */ imx_gpio_mode(GPIO_PORTB | GPIO_IN | GPIO_PUEN | 11); /* imx_gpio_mode(PB11_PF_SD_DAT3); */ imx_gpio_mode(PB12_PF_SD_CLK); imx_gpio_mode(PB13_PF_SD_CMD); imxmci_softreset(); if ( MMC_REV_NO != 0x390 ) { dev_err(mmc_dev(host->mmc), "wrong rev.no. 0x%08x. aborting.\n", MMC_REV_NO); goto out; } MMC_READ_TO = 0x2db4; /* recommended in data sheet */ host->imask = IMXMCI_INT_MASK_DEFAULT; MMC_INT_MASK = host->imask; if(imx_dma_request_by_prio(&host->dma, DRIVER_NAME, DMA_PRIO_LOW)<0){ dev_err(mmc_dev(host->mmc), "imx_dma_request_by_prio failed\n"); ret = -EBUSY; goto out; } host->dma_allocated=1; imx_dma_setup_handlers(host->dma, imxmci_dma_irq, NULL, host); tasklet_init(&host->tasklet, imxmci_tasklet_fnc, (unsigned long)host); host->status_reg=0; host->pending_events=0; ret = request_irq(host->irq, imxmci_irq, 0, DRIVER_NAME, host); if (ret) goto out; host->present = host->pdata->card_present(); init_timer(&host->timer); host->timer.data = (unsigned long)host; host->timer.function = imxmci_check_status; add_timer(&host->timer); mod_timer(&host->timer, jiffies + (HZ>>1)); platform_set_drvdata(pdev, mmc); mmc_add_host(mmc); return 0; out: if (host) { if(host->dma_allocated){ imx_dma_free(host->dma); host->dma_allocated=0; } } if (mmc) mmc_free_host(mmc); release_resource(r); return ret; }
int msmsdcc_probe(struct platform_device *pdev) { struct mmc_platform_data *plat = pdev->dev.platform_data; struct msmsdcc_host *host; struct mmc_host *mmc; struct resource *irqres = NULL; struct resource *memres = NULL; struct resource *dmares = NULL; int ret; /* must have platform data */ if (!plat) { printk(KERN_ERR "%s: Platform data not available\n", __func__); ret = -EINVAL; goto out; } if (pdev->id < 1 || pdev->id > 4) return -EINVAL; if (pdev->resource == NULL || pdev->num_resources < 2) { printk(KERN_ERR "%s: Invalid resource\n", __func__); return -ENXIO; } memres = platform_get_resource(pdev, IORESOURCE_MEM, 0); dmares = platform_get_resource(pdev, IORESOURCE_DMA, 0); irqres = platform_get_resource(pdev, IORESOURCE_IRQ, 0); if (!irqres || !memres) { printk(KERN_ERR "%s: Invalid resource\n", __func__); return -ENXIO; } /* * Setup our host structure */ mmc = mmc_alloc_host(sizeof(struct msmsdcc_host), &pdev->dev); if (!mmc) { ret = -ENOMEM; goto out; } host = mmc_priv(mmc); host->pdev_id = pdev->id; host->plat = plat; host->mmc = mmc; host->cmdpoll = 1; host->base = ioremap(memres->start, PAGE_SIZE); if (!host->base) { ret = -ENOMEM; goto out; } host->irqres = irqres; host->memres = memres; host->dmares = dmares; spin_lock_init(&host->lock); #ifdef CONFIG_MMC_EMBEDDED_SDIO if (plat->embedded_sdio) mmc_set_embedded_sdio_data(mmc, &plat->embedded_sdio->cis, &plat->embedded_sdio->cccr, plat->embedded_sdio->funcs, plat->embedded_sdio->num_funcs); #endif #ifdef CONFIG_MMC_MSM7X00A_RESUME_IN_WQ INIT_WORK(&host->resume_task, do_resume_work); #endif /* * Setup DMA */ msmsdcc_init_dma(host); /* * Setup main peripheral bus clock */ host->pclk = clk_get(&pdev->dev, "sdc_pclk"); if (IS_ERR(host->pclk)) { ret = PTR_ERR(host->pclk); printk(KERN_ERR "%s: failed to get pclock (%d)\n", __func__, ret); goto host_free; } ret = clk_enable(host->pclk); if (ret) goto pclk_put; host->pclk_rate = clk_get_rate(host->pclk); /* * Setup SDC MMC clock */ host->clk = clk_get(&pdev->dev, "sdc_clk"); if (IS_ERR(host->clk)) { ret = PTR_ERR(host->clk); printk(KERN_ERR "%s: failed to get clock (%d)\n", __func__, ret); goto pclk_disable; } ret = clk_enable(host->clk); if (ret) goto clk_put; ret = clk_set_rate(host->clk, msmsdcc_fmin); if (ret) { printk(KERN_ERR "%s: Clock rate set failed (%d)\n", __func__, ret); goto clk_disable; } host->clk_rate = clk_get_rate(host->clk); host->clks_on = 1; /* * Setup MMC host structure */ mmc->ops = &msmsdcc_ops; mmc->f_min = msmsdcc_fmin; mmc->f_max = msmsdcc_fmax; mmc->ocr_avail = plat->ocr_mask; if (msmsdcc_4bit) mmc->caps |= MMC_CAP_4_BIT_DATA; if (msmsdcc_sdioirq) mmc->caps |= MMC_CAP_SDIO_IRQ; mmc->caps |= MMC_CAP_MMC_HIGHSPEED | MMC_CAP_SD_HIGHSPEED; mmc->max_phys_segs = NR_SG; mmc->max_hw_segs = NR_SG; mmc->max_blk_size = 4096; /* MCI_DATA_CTL BLOCKSIZE up to 4096 */ mmc->max_blk_count = 65536; mmc->max_req_size = 33554432; /* MCI_DATA_LENGTH is 25 bits */ mmc->max_seg_size = mmc->max_req_size; writel(0, host->base + MMCIMASK0); writel(0x5e007ff, host->base + MMCICLEAR); /* Add: 1 << 25 */ writel(MCI_IRQENABLE, host->base + MMCIMASK0); host->saved_irq0mask = MCI_IRQENABLE; /* * Setup card detect change */ memset(&host->timer, 0, sizeof(host->timer)); if (plat->register_status_notify) { plat->register_status_notify(msmsdcc_status_notify_cb, host); } else if (!plat->status) printk(KERN_ERR "%s: No card detect facilities available\n", mmc_hostname(mmc)); else { init_timer(&host->timer); host->timer.data = (unsigned long)host; host->timer.function = msmsdcc_check_status; host->timer.expires = jiffies + HZ; add_timer(&host->timer); } if (plat->status) { host->oldstat = host->plat->status(mmc_dev(host->mmc)); host->eject = !host->oldstat; } /* * Setup a command timer. We currently need this due to * some 'strange' timeout / error handling situations. */ init_timer(&host->command_timer); host->command_timer.data = (unsigned long) host; host->command_timer.function = msmsdcc_command_expired; ret = request_irq(irqres->start, msmsdcc_irq, IRQF_SHARED, DRIVER_NAME " (cmd)", host); if (ret) goto stat_irq_free; ret = request_irq(irqres->end, msmsdcc_pio_irq, IRQF_SHARED, DRIVER_NAME " (pio)", host); if (ret) goto cmd_irq_free; mmc_set_drvdata(pdev, mmc); mmc_claim_host(mmc); printk(KERN_INFO "%s: Qualcomm MSM SDCC at 0x%016llx irq %d,%d dma %d\n", mmc_hostname(mmc), (unsigned long long)memres->start, (unsigned int) irqres->start, (unsigned int) host->stat_irq, host->dma.channel); printk(KERN_INFO "%s: 4 bit data mode %s\n", mmc_hostname(mmc), (mmc->caps & MMC_CAP_4_BIT_DATA ? "enabled" : "disabled")); printk(KERN_INFO "%s: MMC clock %u -> %u Hz, PCLK %u Hz\n", mmc_hostname(mmc), msmsdcc_fmin, msmsdcc_fmax, host->pclk_rate); printk(KERN_INFO "%s: Slot eject status = %d\n", mmc_hostname(mmc), host->eject); printk(KERN_INFO "%s: Power save feature enable = %d\n", mmc_hostname(mmc), msmsdcc_pwrsave); if (host->dma.channel != -1) { printk(KERN_INFO "%s: DM non-cached buffer at %p, dma_addr 0x%.8x\n", mmc_hostname(mmc), host->dma.nc, host->dma.nc_busaddr); printk(KERN_INFO "%s: DM cmd busaddr 0x%.8x, cmdptr busaddr 0x%.8x\n", mmc_hostname(mmc), host->dma.cmd_busaddr, host->dma.cmdptr_busaddr); } else printk(KERN_INFO "%s: PIO transfer enabled\n", mmc_hostname(mmc)); if (host->timer.function) printk(KERN_INFO "%s: Polling status mode enabled\n", mmc_hostname(mmc)); #if defined(CONFIG_DEBUG_FS) msmsdcc_dbg_createhost(host); #endif return 0; cmd_irq_free: free_irq(irqres->start, host); stat_irq_free: if (host->stat_irq) free_irq(host->stat_irq, host); clk_disable: clk_disable(host->clk); clk_put: clk_put(host->clk); pclk_disable: clk_disable(host->pclk); pclk_put: clk_put(host->pclk); host_free: mmc_free_host(mmc); out: return ret; }
static int goldfish_mmc_probe(struct platform_device *pdev) { struct mmc_host *mmc; struct goldfish_mmc_host *host = NULL; struct resource *res; int ret = 0; int irq; dma_addr_t buf_addr; res = platform_get_resource(pdev, IORESOURCE_MEM, 0); irq = platform_get_irq(pdev, 0); if (res == NULL || irq < 0) return -ENXIO; mmc = mmc_alloc_host(sizeof(struct goldfish_mmc_host), &pdev->dev); if (mmc == NULL) { ret = -ENOMEM; goto err_alloc_host_failed; } host = mmc_priv(mmc); host->mmc = mmc; #if defined(CONFIG_ARM) host->reg_base = (void __iomem *)IO_ADDRESS(res->start - IO_START); host->virt_base = dma_alloc_writecombine(&pdev->dev, BUFFER_SIZE, &buf_addr, GFP_KERNEL); #elif defined(CONFIG_X86) || defined(CONFIG_MIPS) /* * Use NULL for dev for ISA-like devices */ host->reg_base = ioremap(res->start, res->end - res->start + 1); host->virt_base = dma_alloc_coherent(NULL, BUFFER_SIZE, &buf_addr, GFP_KERNEL); #else #error NOT SUPPORTED #endif if(host->virt_base == 0) { ret = -EBUSY; goto dma_alloc_failed; } host->phys_base = buf_addr; host->id = pdev->id; host->irq = irq; mmc->ops = &goldfish_mmc_ops; mmc->f_min = 400000; mmc->f_max = 24000000; mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34; mmc->caps = MMC_CAP_4_BIT_DATA; /* Use scatterlist DMA to reduce per-transfer costs. * NOTE max_seg_size assumption that small blocks aren't * normally used (except e.g. for reading SD registers). */ mmc->max_segs = 32; mmc->max_blk_size = 2048; /* MMC_BLOCK_LENGTH is 11 bits (+1) */ mmc->max_blk_count = 2048; /* MMC_BLOCK_COUNT is 11 bits (+1) */ mmc->max_req_size = BUFFER_SIZE; mmc->max_seg_size = mmc->max_req_size; ret = request_irq(host->irq, goldfish_mmc_irq, 0, DRIVER_NAME, host); if (ret) goto err_request_irq_failed; host->dev = &pdev->dev; platform_set_drvdata(pdev, host); ret = device_create_file(&pdev->dev, &dev_attr_cover_switch); if (ret) dev_warn(mmc_dev(host->mmc), "Unable to create sysfs attributes\n"); GOLDFISH_MMC_WRITE(host, MMC_SET_BUFFER, host->phys_base); GOLDFISH_MMC_WRITE(host, MMC_INT_ENABLE, MMC_STAT_END_OF_CMD | MMC_STAT_END_OF_DATA | MMC_STAT_STATE_CHANGE | MMC_STAT_CMD_TIMEOUT); mmc_add_host(mmc); return 0; err_request_irq_failed: #if defined(CONFIG_ARM) dma_free_writecombine(&pdev->dev, BUFFER_SIZE, host->virt_base, host->phys_base); #elif defined(CONFIG_X86) || defined(CONFIG_MIPS) dma_free_coherent(NULL, BUFFER_SIZE, host->virt_base, host->phys_base); #else #error NOT SUPPORTED #endif dma_alloc_failed: mmc_free_host(host->mmc); err_alloc_host_failed: return ret; }
static int ocsdc_probe(struct platform_device *pdev) { int ret; struct mmc_host * mmc; struct ocsdc_dev * dev; struct resource *res; mmc = mmc_alloc_host(sizeof(struct ocsdc_dev), &pdev->dev); if (!mmc) return -ENOMEM; dev = mmc_priv(mmc); dev->clk_freq = 50000000; res = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!res) { dev_err(&pdev->dev, "cannot obtain I/O memory space\n"); ret = -ENXIO; goto ERROR; } dev->iobase = devm_request_and_ioremap(&pdev->dev, res); if (!dev->iobase) { dev_err(&pdev->dev, "cannot request I/O memory space\n"); ret = -ENXIO; goto ERROR; } dev->irq_cmd = platform_get_irq(pdev, 0); dev->irq_data = platform_get_irq(pdev, 1); if (dev->irq_cmd < 0 || dev->irq_data < 0) { ret = -ENXIO; goto ERROR; } ocsdc_init(dev); //ret = request_irq( 14, (irq_handler_t)ocsdc_irq_cmd, 0,mmc_hostname(mmc), mmc); ret = devm_request_irq(&pdev->dev, dev->irq_cmd, (irq_handler_t)ocsdc_irq_cmd, 0, mmc_hostname(mmc), mmc); if (ret) goto ERROR; //ret = request_irq( 15, (irq_handler_t)ocsdc_irq_data, 0, mmc_hostname(mmc), mmc); ret = devm_request_irq(&pdev->dev, dev->irq_data, (irq_handler_t)ocsdc_irq_data, 0, mmc_hostname(mmc), mmc); if (ret) goto ERROR; mmc->ops = &ocsdc_ops; mmc->f_min = dev->clk_freq/6; // 64; mmc->f_max = dev->clk_freq/2; mmc->caps = MMC_CAP_4_BIT_DATA; mmc->caps2 = 0; mmc->max_segs = 1; mmc->max_blk_size = 1 << 12; mmc->max_blk_count = 1 << 16; mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count; mmc->max_seg_size = mmc->max_req_size; mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34; //ocsdc_get_voltage(dev); ret = mmc_add_host(mmc); if (ret < 0) goto ERROR; platform_set_drvdata(pdev, mmc); // printk("ocsdc_probe\n"); return 0; ERROR: mmc_free_host(mmc); return ret; }
static int sslsd_probe(struct platform_device *dev) { struct mmc_host *mmc; sslsd_host *host; int ret; int irq; struct resource *r; sd_clk = sd_clk * 1000000; #if IO_MAP == 1 r = platform_get_resource(dev, IORESOURCE_MEM, 0); irq = platform_get_resource(dev, IORESOURCE_IRQ, 0)->start; #else r = _rc; irq = _rc[1].start; #endif if (!r || irq == NO_IRQ) { printk("sslsd: probe err - resource\n"); return -ENXIO; } mmc = mmc_alloc_host(sizeof(sslsd_host), &dev->dev); if (!mmc) { printk("sslsd: probe err - alloc host\n"); return -ENOMEM; } host = mmc_priv(mmc); #if IO_MAP == 1 // r = request_mem_region(r->start, 0x100, DRV_NAME); host->hw.r = (uint32_t)ioremap_nocache(r->start, r->end - r->start + 1); if (!host->hw.r) { printk("sslsd: probe err - ioremap\n"); ret = -EBUSY; goto l_region; } #else host->hw.r = r->start; #endif host->hw.r += 0x100; if (!clk) { printk("sslsd: probe info - clk not passed it assuming 25MHz\n"); clk = 25000000; } if (!tout) { tout = clk; } host->hw.evt = sslsd_evt; host->hw.baseclk = clk / 100000; host->hw.toutclk = tout / 1000; host->hw.ctx = host; ret = sdhc_init(&host->hw); if (ret) { printk("sslsd: probe err - sdhc_init\n"); ret = -ENODEV; goto l_host; } #if SD_DMA if (!host->hw.fdma) { printk("sslsd: probe warn - no dma support\n"); } else { #if IO_MAP == 3 ebm_mem = ebm_malloc(0x1000); #endif } #endif mmc->ops = &sslsd_ops; mmc->f_min = 100000; // mmc->f_max = host->hw.fhspeed ? 50000000 : 25000000; mmc->f_max = host->hw.fhspeed ? 50000000 : sd_clk; mmc->ocr_avail = host->hw.ocr; mmc->caps = MMC_CAP_4_BIT_DATA | MMC_CAP_MULTIWRITE | MMC_CAP_SDIO_IRQ; if (host->hw.fhspeed) { mmc->caps |= MMC_CAP_SD_HIGHSPEED; } #ifdef SD_1BIT mmc->caps &= ~MMC_CAP_4_BIT_DATA; #endif mmc->max_hw_segs = 64; mmc->max_phys_segs = 64; mmc->max_seg_size = mmc->max_req_size = 512 * 1024; mmc->max_blk_size = 1 << (9 + host->hw.maxblksz); mmc->max_blk_count = 65535; //mmc->max_hw_segs = mmc->max_phys_segs = 1; //printk("sslsd: segs=%d blks=%d blksz=%d caps=%X\n", //mmc->max_hw_segs, mmc->max_blk_count, mmc->max_blk_size, mmc->caps); host->mmc = mmc; host->res = r; host->irq = irq; sdhc_irq = irq; //printk("sslsd: probe info - req_irq %d\n", irq); ret = request_irq(irq, sslsd_irq, IRQF_DISABLED, DRV_NAME, host); if (ret) { printk("sslsd: probe err - req_irq %d err %d\n", irq, ret); goto l_init; } /* tout timer */ init_timer(&host->timer); host->timer.data = (unsigned long)host; host->timer.function = sslsd_tout; platform_set_drvdata(dev, mmc); mmc_add_host(mmc); /* build sysfs */ { int error = sysfs_create_group(&dev->dev.kobj, &sslsd_attr_group); if (error) { printk("register sysfs for power is failure %d\n", error); } } return 0; l_init: sdhc_exit(&host->hw); l_region: #if IO_MAP == 1 // release_resource(r); iounmap((void *)(host->hw.r - 0x100)); #endif l_host: mmc_free_host(mmc); return ret; }
static int __devinit hi_mci_probe(struct platform_device *pdev) { struct mmc_host *mmc; struct himci_host *host = NULL; int ret = 0, irq; himci_trace(2, "begin"); himci_assert(pdev); mmc = mmc_alloc_host(sizeof(struct himci_host), &pdev->dev); if (!mmc) { himci_error("no mem for hi mci host controller!\n"); ret = -ENOMEM; goto out; } mmc->ops = &hi_mci_ops; mmc->f_min = CONFIG_MMC_CCLK_MIN; mmc->f_max = CONFIG_MMC_CCLK_MAX; mmc->caps |= MMC_CAP_4_BIT_DATA | MMC_CAP_SD_HIGHSPEED | MMC_CAP_MMC_HIGHSPEED; /* reload by this controller */ mmc->max_blk_count = 2048; mmc->max_segs = 1024; mmc->max_req_size = 65535;/* see IP manual */ mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34; mmc->ocr = mmc->ocr_avail; host = mmc_priv(mmc); host->dma_vaddr = dma_alloc_coherent(&pdev->dev, PAGE_SIZE, &host->dma_paddr, GFP_KERNEL); if (!host->dma_vaddr) { himci_error("no mem for himci dma!\n"); ret = -ENOMEM; goto out; } host->mmc = mmc; host->base = ioremap_nocache(CONFIG_HIMCI_IOBASE, HI_MCI_IO_SIZE); if (!host->base) { himci_error("no mem for himci base!\n"); ret = -ENOMEM; goto out; } /* enable mmc clk */ hi_mci_sys_ctrl_init(host); /* enable card */ spin_lock_init(&host->lock); hi_mci_init_card(host); host->card_status = hi_mci_sys_card_detect(host); init_timer(&host->timer); host->timer.function = hi_mci_detect_card; host->timer.data = (unsigned long)host; host->timer.expires = jiffies + detect_time; platform_set_drvdata(pdev, mmc); mmc_add_host(mmc); add_timer(&host->timer); irq = platform_get_irq(pdev, 0); if (irq < 0) { printk(KERN_ERR "no IRQ defined!\n"); goto out; } init_waitqueue_head(&host->intr_wait); host->irq = irq; ret = request_irq(irq, hisd_irq, 0, DRIVER_NAME, host); if (ret) { printk(KERN_ERR "request_irq error!\n"); goto out; } return 0; out: if (host) { if (host->base) iounmap(host->base); if (host->dma_vaddr) dma_free_coherent(&pdev->dev, PAGE_SIZE, host->dma_vaddr, host->dma_paddr); } if (mmc) mmc_free_host(mmc); return ret; }
static int __init rk28_sdio_probe(struct platform_device *pdev) { int ret = 0; struct resource *res; struct mmc_host *mci_host; struct rk28_sdio_priv *host_priv; res = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!res) return -ENXIO; if (!request_mem_region(res->start, res->end - res->start + 1, DRIVER_NAME)) return -EBUSY; mci_host = mmc_alloc_host(sizeof(struct rk28_sdio_priv), &pdev->dev); if (mci_host == NULL) { printk("Allocate MCI host fail.\n"); ret = -ENOMEM; goto release_region; } /* Initiating mci_host */ mci_host->ops = &rk28_sdio_host_ops; mci_host->f_min = 370000; //370kHz~25MHz mci_host->f_max = 25000000; mci_host->ocr_avail = MMC_VDD_26_27|MMC_VDD_27_28|MMC_VDD_28_29|MMC_VDD_29_30| MMC_VDD_30_31|MMC_VDD_31_32|MMC_VDD_32_33 | MMC_VDD_33_34; mci_host->max_blk_size = 4095; mci_host->max_blk_count = mci_host->max_req_size; mci_host->caps |= MMC_CAP_MMC_HIGHSPEED | MMC_CAP_SD_HIGHSPEED; #if (SDIO_DATA_WIDTH == SDIO_DATA_WIDTH_4) mci_host->caps |= MMC_CAP_4_BIT_DATA | MMC_CAP_SDIO_IRQ; #endif host_priv = mmc_priv(mci_host); memset(host_priv, 0, sizeof(struct rk28_sdio_priv)); host_priv->mmc = mci_host; host_priv->iomem_base = (void * __iomem)SDMMC1_BASE_ADDR_VA; host_priv->lock = SPIN_LOCK_UNLOCKED; host_priv->dma_chan = -1; ret = rk28_dma_request(RK28_DMA_SD_MMC1, rk28_sdio_dma_done, host_priv); if (ret != 0) { printk("Request DMA channel for SDMMC1 failed.\n"); goto free_host; } host_priv->dma_chan = RK28_DMA_SD_MMC1; rk28_sdio_dma_switch(host_priv, DO_ENABLE, DO_LOCK); if (rk28_sdio_hw_init(host_priv) != 0) { ret = -ENXIO; goto free_host; } host_priv->irq = platform_get_irq(pdev, 0); ret = request_irq(host_priv->irq, rk28_sdio_mci_irq, IRQF_SHARED, mmc_hostname(mci_host), host_priv); if (ret != 0) { printk("Request IRQ for RK28 Host fail.\n"); ret = -ENOMEM; goto free_host; } platform_set_drvdata(pdev, mci_host); /* * Max frequency of AHB is 166, for guarantee divider=6, * we set the max frequency for SDMMC1 as 26. */ rockchip_scu_register( SCU_IPID_SDMMC1, SCU_MODE_FREQ, 26, NULL); mmc_add_host(mci_host); wifi_mmc_host = mci_host; rk28_sdio_host_irq_switch(host_priv, DO_ENABLE); return 0; free_host: mmc_free_host(mci_host); release_region: release_mem_region(res->start, res->end - res->start + 1); return ret; }
static int ushc_probe(struct usb_interface *intf, const struct usb_device_id *id) { struct usb_device *usb_dev = interface_to_usbdev(intf); struct mmc_host *mmc; struct ushc_data *ushc; int ret; mmc = mmc_alloc_host(sizeof(struct ushc_data), &intf->dev); if (mmc == NULL) return -ENOMEM; ushc = mmc_priv(mmc); usb_set_intfdata(intf, ushc); ushc->usb_dev = usb_dev; ushc->mmc = mmc; spin_lock_init(&ushc->lock); ret = ushc_hw_reset(ushc); if (ret < 0) goto err; /* Read capabilities. */ ret = ushc_hw_get_caps(ushc); if (ret < 0) goto err; mmc->ops = &ushc_ops; mmc->f_min = 400000; mmc->f_max = 50000000; mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34; mmc->caps = MMC_CAP_4_BIT_DATA | MMC_CAP_SDIO_IRQ; mmc->caps |= (ushc->caps & USHC_GET_CAPS_HIGH_SPD) ? MMC_CAP_SD_HIGHSPEED : 0; mmc->max_seg_size = 512*511; mmc->max_segs = 1; mmc->max_req_size = 512*511; mmc->max_blk_size = 512; mmc->max_blk_count = 511; ushc->int_urb = usb_alloc_urb(0, GFP_KERNEL); if (ushc->int_urb == NULL) { ret = -ENOMEM; goto err; } ushc->int_data = kzalloc(sizeof(struct ushc_int_data), GFP_KERNEL); if (ushc->int_data == NULL) { ret = -ENOMEM; goto err; } usb_fill_int_urb(ushc->int_urb, ushc->usb_dev, usb_rcvintpipe(usb_dev, intf->cur_altsetting->endpoint[0].desc.bEndpointAddress), ushc->int_data, sizeof(struct ushc_int_data), int_callback, ushc, intf->cur_altsetting->endpoint[0].desc.bInterval); ushc->cbw_urb = usb_alloc_urb(0, GFP_KERNEL); if (ushc->cbw_urb == NULL) { ret = -ENOMEM; goto err; } ushc->cbw = kzalloc(sizeof(struct ushc_cbw), GFP_KERNEL); if (ushc->cbw == NULL) { ret = -ENOMEM; goto err; } ushc->cbw->signature = USHC_CBW_SIGNATURE; usb_fill_bulk_urb(ushc->cbw_urb, ushc->usb_dev, usb_sndbulkpipe(usb_dev, 2), ushc->cbw, sizeof(struct ushc_cbw), cbw_callback, ushc); ushc->data_urb = usb_alloc_urb(0, GFP_KERNEL); if (ushc->data_urb == NULL) { ret = -ENOMEM; goto err; } ushc->csw_urb = usb_alloc_urb(0, GFP_KERNEL); if (ushc->csw_urb == NULL) { ret = -ENOMEM; goto err; } ushc->csw = kzalloc(sizeof(struct ushc_cbw), GFP_KERNEL); if (ushc->csw == NULL) { ret = -ENOMEM; goto err; } usb_fill_bulk_urb(ushc->csw_urb, ushc->usb_dev, usb_rcvbulkpipe(usb_dev, 6), ushc->csw, sizeof(struct ushc_csw), csw_callback, ushc); ret = mmc_add_host(ushc->mmc); if (ret) goto err; ret = usb_submit_urb(ushc->int_urb, GFP_KERNEL); if (ret < 0) { mmc_remove_host(ushc->mmc); goto err; } return 0; err: ushc_clean_up(ushc); return ret; }
static int __devinit mmci_probe(struct amba_device *dev, struct amba_id *id) { struct mmci_platform_data *plat = dev->dev.platform_data; struct mmci_host *host; struct mmc_host *mmc; int ret; /* must have platform data */ if (!plat) { ret = -EINVAL; goto out; } ret = amba_request_regions(dev, DRIVER_NAME); if (ret) goto out; mmc = mmc_alloc_host(sizeof(struct mmci_host), &dev->dev); if (!mmc) { ret = -ENOMEM; goto rel_regions; } host = mmc_priv(mmc); host->mmc = mmc; host->gpio_wp = -ENOSYS; host->gpio_cd = -ENOSYS; host->hw_designer = amba_manf(dev); host->hw_revision = amba_rev(dev); DBG(host, "designer ID = 0x%02x\n", host->hw_designer); DBG(host, "revision = 0x%01x\n", host->hw_revision); host->clk = clk_get(&dev->dev, NULL); if (IS_ERR(host->clk)) { ret = PTR_ERR(host->clk); host->clk = NULL; goto host_free; } ret = clk_enable(host->clk); if (ret) goto clk_free; host->plat = plat; host->mclk = clk_get_rate(host->clk); /* * According to the spec, mclk is max 100 MHz, * so we try to adjust the clock down to this, * (if possible). */ if (host->mclk > 100000000) { ret = clk_set_rate(host->clk, 100000000); if (ret < 0) goto clk_disable; host->mclk = clk_get_rate(host->clk); DBG(host, "eventual mclk rate: %u Hz\n", host->mclk); } host->base = ioremap(dev->res.start, resource_size(&dev->res)); if (!host->base) { ret = -ENOMEM; goto clk_disable; } mmc->ops = &mmci_ops; mmc->f_min = (host->mclk + 511) / 512; mmc->f_max = min(host->mclk, fmax); #ifdef CONFIG_REGULATOR /* If we're using the regulator framework, try to fetch a regulator */ host->vcc = regulator_get(&dev->dev, "vmmc"); if (IS_ERR(host->vcc)) host->vcc = NULL; else { int mask = mmc_regulator_get_ocrmask(host->vcc); if (mask < 0) dev_err(&dev->dev, "error getting OCR mask (%d)\n", mask); else { host->mmc->ocr_avail = (u32) mask; if (plat->ocr_mask) dev_warn(&dev->dev, "Provided ocr_mask/setpower will not be used " "(using regulator instead)\n"); } } #endif /* Fall back to platform data if no regulator is found */ if (host->vcc == NULL) mmc->ocr_avail = plat->ocr_mask; mmc->caps = plat->capabilities; /* * We can do SGIO */ mmc->max_hw_segs = 16; mmc->max_phys_segs = NR_SG; /* * Since we only have a 16-bit data length register, we must * ensure that we don't exceed 2^16-1 bytes in a single request. */ mmc->max_req_size = 65535; /* * Set the maximum segment size. Since we aren't doing DMA * (yet) we are only limited by the data length register. */ mmc->max_seg_size = mmc->max_req_size; /* * Block size can be up to 2048 bytes, but must be a power of two. */ mmc->max_blk_size = 2048; /* * No limit on the number of blocks transferred. */ mmc->max_blk_count = mmc->max_req_size; spin_lock_init(&host->lock); writel(0, host->base + MMCIMASK0); writel(0, host->base + MMCIMASK1); writel(0xfff, host->base + MMCICLEAR); if (gpio_is_valid(plat->gpio_cd)) { ret = gpio_request(plat->gpio_cd, DRIVER_NAME " (cd)"); if (ret == 0) ret = gpio_direction_input(plat->gpio_cd); if (ret == 0) host->gpio_cd = plat->gpio_cd; else if (ret != -ENOSYS) goto err_gpio_cd; } if (gpio_is_valid(plat->gpio_wp)) { ret = gpio_request(plat->gpio_wp, DRIVER_NAME " (wp)"); if (ret == 0) ret = gpio_direction_input(plat->gpio_wp); if (ret == 0) host->gpio_wp = plat->gpio_wp; else if (ret != -ENOSYS) goto err_gpio_wp; } ret = request_irq(dev->irq[0], mmci_irq, IRQF_SHARED, DRIVER_NAME " (cmd)", host); if (ret) goto unmap; ret = request_irq(dev->irq[1], mmci_pio_irq, IRQF_SHARED, DRIVER_NAME " (pio)", host); if (ret) goto irq0_free; writel(MCI_IRQENABLE, host->base + MMCIMASK0); amba_set_drvdata(dev, mmc); host->oldstat = mmci_get_cd(host->mmc); mmc_add_host(mmc); printk(KERN_INFO "%s: MMCI rev %x cfg %02x at 0x%016llx irq %d,%d\n", mmc_hostname(mmc), amba_rev(dev), amba_config(dev), (unsigned long long)dev->res.start, dev->irq[0], dev->irq[1]); init_timer(&host->timer); host->timer.data = (unsigned long)host; host->timer.function = mmci_check_status; host->timer.expires = jiffies + HZ; add_timer(&host->timer); return 0; irq0_free: free_irq(dev->irq[0], host); unmap: if (host->gpio_wp != -ENOSYS) gpio_free(host->gpio_wp); err_gpio_wp: if (host->gpio_cd != -ENOSYS) gpio_free(host->gpio_cd); err_gpio_cd: iounmap(host->base); clk_disable: clk_disable(host->clk); clk_free: clk_put(host->clk); host_free: mmc_free_host(mmc); rel_regions: amba_release_regions(dev); out: return ret; }
/* Probe peripheral for connected cards */ static int __init stmp3xxx_mmc_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; struct stmp3xxxmmc_platform_data *mmc_data; struct stmp3xxx_mmc_host *host; struct mmc_host *mmc; struct resource *r; int err = 0; mmc_data = dev->platform_data; if (mmc_data == NULL) { err = -EINVAL; dev_err(dev, "Missing platform data\n"); goto out; } /* Allocate main MMC host structure */ mmc = mmc_alloc_host(sizeof(struct stmp3xxx_mmc_host), dev); if (!mmc) { dev_err(dev, "Unable to allocate MMC host\n"); err = -ENOMEM; goto out; } host = mmc_priv(mmc); host->read_uA = mmc_data->read_uA; host->write_uA = mmc_data->write_uA; host->regulator = regulator_get(NULL, "mmc_ssp-1"); if (host->regulator && !IS_ERR(host->regulator)) regulator_set_mode(host->regulator, REGULATOR_MODE_NORMAL); else host->regulator = NULL; /* get resources: */ /* * 1. io memory */ r = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!r) { dev_err(&pdev->dev, "failed to get IORESOURCE_MEM\n"); err = -ENXIO; goto out_res; } host->ssp_base = r->start - STMP3XXX_REGS_PHBASE + STMP3XXX_REGS_BASE; /* * 2. DMA channel */ r = platform_get_resource(pdev, IORESOURCE_DMA, 0); if (!r) { dev_err(&pdev->dev, "failed to get IORESOURCE_DMA\n"); err = -ENXIO; goto out_res; } host->dmach = r->start; /* * 3. two IRQs */ host->dmairq = platform_get_irq(pdev, 0); if (host->dmairq < 0) { dev_err(&pdev->dev, "failed to get IORESOURCE_IRQ/0\n"); err = host->dmairq; goto out_res; } host->errirq = platform_get_irq(pdev, 1); if (host->errirq < 0) { dev_err(&pdev->dev, "failed to get IORESOURCE_IRQ/1\n"); err = host->errirq; goto out_res; } /* Set up MMC pins */ if (mmc_data->hw_init) { err = mmc_data->hw_init(); if (err) { dev_err(dev, "MMC HW configuration failed\n"); goto out_res; } } host->mmc = mmc; host->dev = dev; /* Set minimal clock rate */ host->clk = clk_get(dev, "ssp"); if (IS_ERR(host->clk)) { err = PTR_ERR(host->clk); dev_err(dev, "Clocks initialization failed\n"); goto out_clk; } clk_enable(host->clk); stmp3xxx_set_sclk_speed(host, CLOCKRATE_MIN); /* Reset MMC block */ stmp3xxx_mmc_reset(host); /* Enable DMA */ err = stmp3xxx_mmc_dma_init(host, 0); if (err) { dev_err(dev, "DMA init failed\n"); goto out_dma; } /* Set up interrupt handlers */ err = stmp3xxx_mmc_irq_init(host); if (err) { dev_err(dev, "IRQ initialization failed\n"); goto out_irq; } /* Get current card status for further cnanges tracking */ host->present = stmp3xxx_mmc_is_plugged(host); /* Add a card detection polling timer */ init_timer(&host->timer); host->timer.function = stmp3xxx_mmc_detect_poll; host->timer.data = (unsigned long)host; host->timer.expires = jiffies + STMP37XX_MMC_DETECT_TIMEOUT; add_timer(&host->timer); mmc->ops = &stmp3xxx_mmc_ops; mmc->f_min = CLOCKRATE_MIN; mmc->f_max = CLOCKRATE_MAX; mmc->caps = MMC_CAP_4_BIT_DATA; /* Maximum block count requests. */ mmc->max_blk_size = 512; mmc->max_blk_count = SSP_BUFFER_SIZE / 512; mmc->max_hw_segs = SSP_BUFFER_SIZE / 512; mmc->max_phys_segs = SSP_BUFFER_SIZE / 512; mmc->max_req_size = SSP_BUFFER_SIZE; mmc->max_seg_size = SSP_BUFFER_SIZE; mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34; platform_set_drvdata(pdev, mmc); err = mmc_add_host(mmc); if (err) { dev_err(dev, "Oh God. mmc_add_host failed\n"); goto out_all; } return err; out_all: out_irq: stmp3xxx_mmc_dma_release(host); out_dma: clk_disable(host->clk); out_clk: if (mmc_data->hw_release) mmc_data->hw_release(); out_res: mmc_free_host(mmc); out: return err; }
static int sh_mmcif_probe(struct platform_device *pdev) { int ret = 0, irq[2]; struct mmc_host *mmc; struct sh_mmcif_host *host; struct sh_mmcif_plat_data *pd = pdev->dev.platform_data; struct resource *res; void __iomem *reg; const char *name; irq[0] = platform_get_irq(pdev, 0); irq[1] = platform_get_irq(pdev, 1); if (irq[0] < 0) { dev_err(&pdev->dev, "Get irq error\n"); return -ENXIO; } res = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!res) { dev_err(&pdev->dev, "platform_get_resource error.\n"); return -ENXIO; } reg = ioremap(res->start, resource_size(res)); if (!reg) { dev_err(&pdev->dev, "ioremap error.\n"); return -ENOMEM; } mmc = mmc_alloc_host(sizeof(struct sh_mmcif_host), &pdev->dev); if (!mmc) { ret = -ENOMEM; goto ealloch; } ret = mmc_of_parse(mmc); if (ret < 0) goto eofparse; host = mmc_priv(mmc); host->mmc = mmc; host->addr = reg; host->timeout = msecs_to_jiffies(10000); host->ccs_enable = !pd || !pd->ccs_unsupported; host->clk_ctrl2_enable = pd && pd->clk_ctrl2_present; host->pd = pdev; spin_lock_init(&host->lock); mmc->ops = &sh_mmcif_ops; sh_mmcif_init_ocr(host); mmc->caps |= MMC_CAP_MMC_HIGHSPEED | MMC_CAP_WAIT_WHILE_BUSY; if (pd && pd->caps) mmc->caps |= pd->caps; mmc->max_segs = 32; mmc->max_blk_size = 512; mmc->max_req_size = PAGE_CACHE_SIZE * mmc->max_segs; mmc->max_blk_count = mmc->max_req_size / mmc->max_blk_size; mmc->max_seg_size = mmc->max_req_size; platform_set_drvdata(pdev, host); pm_runtime_enable(&pdev->dev); host->power = false; host->hclk = clk_get(&pdev->dev, NULL); if (IS_ERR(host->hclk)) { ret = PTR_ERR(host->hclk); dev_err(&pdev->dev, "cannot get clock: %d\n", ret); goto eclkget; } ret = sh_mmcif_clk_update(host); if (ret < 0) goto eclkupdate; ret = pm_runtime_resume(&pdev->dev); if (ret < 0) goto eresume; INIT_DELAYED_WORK(&host->timeout_work, mmcif_timeout_work); sh_mmcif_sync_reset(host); sh_mmcif_writel(host->addr, MMCIF_CE_INT_MASK, MASK_ALL); name = irq[1] < 0 ? dev_name(&pdev->dev) : "sh_mmc:error"; ret = request_threaded_irq(irq[0], sh_mmcif_intr, sh_mmcif_irqt, 0, name, host); if (ret) { dev_err(&pdev->dev, "request_irq error (%s)\n", name); goto ereqirq0; } if (irq[1] >= 0) { ret = request_threaded_irq(irq[1], sh_mmcif_intr, sh_mmcif_irqt, 0, "sh_mmc:int", host); if (ret) { dev_err(&pdev->dev, "request_irq error (sh_mmc:int)\n"); goto ereqirq1; } } if (pd && pd->use_cd_gpio) { ret = mmc_gpio_request_cd(mmc, pd->cd_gpio, 0); if (ret < 0) goto erqcd; } mutex_init(&host->thread_lock); clk_disable_unprepare(host->hclk); ret = mmc_add_host(mmc); if (ret < 0) goto emmcaddh; dev_pm_qos_expose_latency_limit(&pdev->dev, 100); dev_info(&pdev->dev, "driver version %s\n", DRIVER_VERSION); dev_dbg(&pdev->dev, "chip ver H'%04x\n", sh_mmcif_readl(host->addr, MMCIF_CE_VERSION) & 0x0000ffff); return ret; emmcaddh: erqcd: if (irq[1] >= 0) free_irq(irq[1], host); ereqirq1: free_irq(irq[0], host); ereqirq0: pm_runtime_suspend(&pdev->dev); eresume: clk_disable_unprepare(host->hclk); eclkupdate: clk_put(host->hclk); eclkget: pm_runtime_disable(&pdev->dev); eofparse: mmc_free_host(mmc); ealloch: iounmap(reg); return ret; }
/* * Probe for the device */ static int __init rk28_sdmmc0_probe(struct platform_device *pdev) { struct mmc_host *mmc; struct rk28mci_host *host; struct resource *res; int ret; res = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!res) return -ENXIO; if (!request_mem_region(res->start, res->end - res->start + 1, DRIVER_NAME)) return -EBUSY; mmc = mmc_alloc_host(sizeof(struct rk28mci_host), &pdev->dev); if (!mmc) { ret = -ENOMEM; dev_dbg(&pdev->dev, "couldn't allocate mmc host\n"); goto fail6; } mmc->ops = &rk28_sdmmc0_ops; mmc->f_min = FOD_FREQ * 1000; mmc->f_max = SDHC_FPP_FREQ; mmc->ocr_avail = MMC_VDD_27_28|MMC_VDD_28_29|MMC_VDD_29_30|MMC_VDD_30_31 | MMC_VDD_31_32|MMC_VDD_32_33 | MMC_VDD_33_34 | MMC_VDD_34_35| MMC_VDD_35_36; ///set valid volage 2.7---3.6 //printk("%s..%s..%d ********host availd ocr=0x%x====xbw===*******************\n",__FUNCTION__,__FILE__,__LINE__, mmc->ocr_avail); mmc->max_blk_size = 4095; mmc->max_blk_count = 1024; mmc->max_req_size = mmc->max_blk_count*512; // 512K mmc->max_seg_size = mmc->max_req_size; host = mmc_priv(mmc); host->mmc = mmc; host->buffer = NULL; host->bus_mode = 0; host->board = pdev->dev.platform_data; host->res = res; testhost = host; sd_cdt_resume = 0; sd_is_suspend_mmc = 0; sd_is_suspend_block = 0; do_Resume = 0; host->cmdr = 0; host->lockscu = 0; //no lock SCU host->cmderror = 0; host->complete_dma = 0; //DMA channel has not request. host->requestDmaError = 0;// setup_timer(&host->switch_timer, rk28_sdmmc0_switch_timer, (unsigned long) host); spin_lock_init(&host->complete_lock); /* android_suspend_lock_t sdmmc0_request_lock; //used to the phase of request. android_suspend_lock_t sdmmc0_full_wake_lock; //used to the phase of removal/insertion, android_suspend_lock_t sdmmc0_rescan_lock; //used to the phase of rescaning the card. android_suspend_lock_t sdmmc0_initSd_lock; //used to the phase of initialization. android_suspend_lock_t sdmmc0_rw_prcess_lock; //used to the phase of Read/Write. */ #ifdef CONFIG_ANDROID_POWER sdmmc0_request_lock.name = "sdmmc-Req"; android_init_suspend_lock(&sdmmc0_request_lock); sdmmc0_full_wake_lock.name = "sdmmc-full-Req"; android_init_suspend_lock(&sdmmc0_full_wake_lock); sdmmc0_rescan_lock.name = "sdmmc-rescan"; android_init_suspend_lock(&sdmmc0_rescan_lock); sdmmc0_initSd_lock.name = "sdmmc-initsd"; android_init_suspend_lock(&sdmmc0_initSd_lock); sdmmc0_rw_prcess_lock.name = "sdmmc-rw"; android_init_suspend_lock(&sdmmc0_rw_prcess_lock); #endif sdmmc0_disable_Irq_ForRemoval = 0; spin_lock_init( &sdmmc0_spinlock); mmc->caps |= ( MMC_CAP_MULTIWRITE | MMC_CAP_MMC_HIGHSPEED | MMC_CAP_SD_HIGHSPEED); if (1)//if (host->board->wire4) { mmc->caps |= MMC_CAP_4_BIT_DATA; } /* * Map I/O region */ host->baseaddr = (void * __iomem)SDMMC0_BASE_ADDR_VA; // if (!host->baseaddr) { ret = -ENOMEM; goto fail1; } //register SCU rockchip_scu_register( SCU_IPID_SDMMC0, SCU_MODE_FREQ, 50, NULL); /* * Allocate the MCI interrupt */ host->irq = platform_get_irq(pdev, 0); ret = request_irq(host->irq, rk28_sdmmc_irq, IRQF_SHARED, mmc_hostname(mmc), host); if (ret) { printk("%s..%s..%d **********request irq failue====xbw===*******************\n",__FUNCTION__,__FILE__,__LINE__); goto fail0; } platform_set_drvdata(pdev, mmc); //printk("%s..%s..%d **********SD probe Over. now it begin to mmc_add_host====xbw===*******************\n",__FUNCTION__,__FILE__,__LINE__); mmc_add_host(mmc); rk28_sdmmc0_add_attr(pdev); return 0; fail0: iounmap(host->baseaddr); fail1: mmc_free_host(mmc); fail6: release_mem_region(res->start, res->end - res->start + 1); dev_err(&pdev->dev, "probe failed, err %d\n", ret); return ret; }
static int __init omap_mmc_probe(struct platform_device *pdev) { struct omap_mmc_platform_data *pdata = pdev->dev.platform_data; struct mmc_host *mmc; struct mmc_omap_host *host = NULL; struct resource *res; int ret = 0, irq; u32 hctl, capa; if (pdata == NULL) { dev_err(&pdev->dev, "Platform Data is missing\n"); return -ENXIO; } if (pdata->nr_slots == 0) { dev_err(&pdev->dev, "No Slots\n"); return -ENXIO; } res = platform_get_resource(pdev, IORESOURCE_MEM, 0); irq = platform_get_irq(pdev, 0); if (res == NULL || irq < 0) return -ENXIO; res = request_mem_region(res->start, res->end - res->start + 1, pdev->name); if (res == NULL) return -EBUSY; mmc = mmc_alloc_host(sizeof(struct mmc_omap_host), &pdev->dev); if (!mmc) { ret = -ENOMEM; goto err; } host = mmc_priv(mmc); host->mmc = mmc; host->pdata = pdata; host->use_dma = 1; host->dma_ch = -1; host->irq = irq; host->id = pdev->id; host->slot_id = 0; host->mapbase = res->start; host->base = ioremap(host->mapbase, SZ_4K); mmc->ops = &mmc_omap_ops; mmc->f_min = 400000; mmc->f_max = 52000000; sema_init(&host->sem, 1); host->iclk = clk_get(&pdev->dev, "mmchs_ick"); if (IS_ERR(host->iclk)) { ret = PTR_ERR(host->iclk); host->iclk = NULL; goto err; } host->fclk = clk_get(&pdev->dev, "mmchs_fck"); if (IS_ERR(host->fclk)) { ret = PTR_ERR(host->fclk); host->fclk = NULL; clk_put(host->iclk); goto err; } if (clk_enable(host->fclk) != 0) goto err; if (clk_enable(host->iclk) != 0) { clk_disable(host->fclk); clk_put(host->fclk); goto err; } host->dbclk = clk_get(&pdev->dev, "mmchsdb_fck"); /* * MMC can still work without debounce clock. */ if (IS_ERR(host->dbclk)) dev_dbg(mmc_dev(host->mmc), "Failed to get debounce clock\n"); else if (clk_enable(host->dbclk) != 0) dev_dbg(mmc_dev(host->mmc), "Enabling debounce" " clk failed\n"); else host->dbclk_enabled = 1; mmc->ocr_avail = mmc_slot(host).ocr_mask; mmc->caps |= MMC_CAP_MULTIWRITE | MMC_CAP_MMC_HIGHSPEED | MMC_CAP_SD_HIGHSPEED; if (pdata->conf.wire4) mmc->caps |= MMC_CAP_4_BIT_DATA; /* Only MMC1 supports 3.0V */ if (host->id == OMAP_MMC1_DEVID) { hctl = SDVS30; capa = VS30 | VS18; } else { hctl = SDVS18; capa = VS18; } OMAP_HSMMC_WRITE(host->base, HCTL, OMAP_HSMMC_READ(host->base, HCTL) | hctl); OMAP_HSMMC_WRITE(host->base, CAPA, OMAP_HSMMC_READ(host->base, CAPA) | capa); /* Set the controller to AUTO IDLE mode */ OMAP_HSMMC_WRITE(host->base, SYSCONFIG, OMAP_HSMMC_READ(host->base, SYSCONFIG) | AUTOIDLE); /* Set SD bus power bit */ OMAP_HSMMC_WRITE(host->base, HCTL, OMAP_HSMMC_READ(host->base, HCTL) | SDBP); /* Request IRQ for MMC operations */ ret = request_irq(host->irq, mmc_omap_irq, IRQF_DISABLED, pdev->name, host); if (ret) { dev_dbg(mmc_dev(host->mmc), "Unable to grab HSMMC IRQ\n"); goto irq_err; } /* Request IRQ for card detect */ if ((mmc_slot(host).card_detect_irq) && (mmc_slot(host).card_detect)) { ret = request_irq(mmc_slot(host).card_detect_irq, omap_mmc_cd_handler, IRQF_DISABLED, "MMC CD", host); if (ret) { dev_dbg(mmc_dev(host->mmc), "Unable to grab MMC CD IRQ"); free_irq(host->irq, host); goto irq_err; } } INIT_WORK(&host->mmc_carddetect_work, mmc_omap_detect); if (pdata->init != NULL) { if (pdata->init(&pdev->dev) != 0) { free_irq(mmc_slot(host).card_detect_irq, host); free_irq(host->irq, host); goto irq_err; } } OMAP_HSMMC_WRITE(host->base, ISE, INT_EN_MASK); OMAP_HSMMC_WRITE(host->base, IE, INT_EN_MASK); platform_set_drvdata(pdev, host); mmc_add_host(mmc); return 0; err: dev_dbg(mmc_dev(host->mmc), "Probe Failed\n"); if (host) mmc_free_host(mmc); return ret; irq_err: dev_dbg(mmc_dev(host->mmc), "Unable to configure MMC IRQs\n"); clk_disable(host->fclk); clk_disable(host->iclk); clk_put(host->fclk); clk_put(host->iclk); if (host->dbclk_enabled) { clk_disable(host->dbclk); clk_put(host->dbclk); } if (host) mmc_free_host(mmc); return ret; }
static int __init omap_hsmmc_probe(struct platform_device *pdev) { struct omap_mmc_platform_data *pdata = pdev->dev.platform_data; struct mmc_host *mmc; struct omap_hsmmc_host *host = NULL; struct resource *res; int ret = 0, irq; if (pdata == NULL) { dev_err(&pdev->dev, "Platform Data is missing\n"); return -ENXIO; } if (pdata->nr_slots == 0) { dev_err(&pdev->dev, "No Slots\n"); return -ENXIO; } res = platform_get_resource(pdev, IORESOURCE_MEM, 0); irq = platform_get_irq(pdev, 0); if (res == NULL || irq < 0) return -ENXIO; res = request_mem_region(res->start, res->end - res->start + 1, pdev->name); if (res == NULL) return -EBUSY; mmc = mmc_alloc_host(sizeof(struct omap_hsmmc_host), &pdev->dev); if (!mmc) { ret = -ENOMEM; goto err; } host = mmc_priv(mmc); host->mmc = mmc; host->pdata = pdata; host->dev = &pdev->dev; host->use_dma = 1; host->dev->dma_mask = &pdata->dma_mask; host->dma_ch = -1; host->irq = irq; host->id = pdev->id; host->slot_id = 0; host->mapbase = res->start; host->base = ioremap(host->mapbase, SZ_4K); host->power_mode = -1; platform_set_drvdata(pdev, host); INIT_WORK(&host->mmc_carddetect_work, omap_hsmmc_detect); if (mmc_slot(host).power_saving) mmc->ops = &omap_hsmmc_ps_ops; else mmc->ops = &omap_hsmmc_ops; mmc->f_min = 400000; mmc->f_max = 52000000; sema_init(&host->sem, 1); spin_lock_init(&host->irq_lock); host->iclk = clk_get(&pdev->dev, "ick"); if (IS_ERR(host->iclk)) { ret = PTR_ERR(host->iclk); host->iclk = NULL; goto err1; } host->fclk = clk_get(&pdev->dev, "fck"); if (IS_ERR(host->fclk)) { ret = PTR_ERR(host->fclk); host->fclk = NULL; clk_put(host->iclk); goto err1; } omap_hsmmc_context_save(host); mmc->caps |= MMC_CAP_DISABLE; mmc_set_disable_delay(mmc, OMAP_MMC_DISABLED_TIMEOUT); /* we start off in DISABLED state */ host->dpm_state = DISABLED; if (mmc_host_enable(host->mmc) != 0) { clk_put(host->iclk); clk_put(host->fclk); goto err1; } if (clk_enable(host->iclk) != 0) { mmc_host_disable(host->mmc); clk_put(host->iclk); clk_put(host->fclk); goto err1; } if (cpu_is_omap2430()) { host->dbclk = clk_get(&pdev->dev, "mmchsdb_fck"); /* * MMC can still work without debounce clock. */ if (IS_ERR(host->dbclk)) dev_warn(mmc_dev(host->mmc), "Failed to get debounce clock\n"); else host->got_dbclk = 1; if (host->got_dbclk) if (clk_enable(host->dbclk) != 0) dev_dbg(mmc_dev(host->mmc), "Enabling debounce" " clk failed\n"); } /* Since we do only SG emulation, we can have as many segs * as we want. */ mmc->max_phys_segs = 1024; mmc->max_hw_segs = 1024; mmc->max_blk_size = 512; /* Block Length at max can be 1024 */ mmc->max_blk_count = 0xFFFF; /* No. of Blocks is 16 bits */ mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count; mmc->max_seg_size = mmc->max_req_size; mmc->caps |= MMC_CAP_MMC_HIGHSPEED | MMC_CAP_SD_HIGHSPEED | MMC_CAP_WAIT_WHILE_BUSY; if (mmc_slot(host).wires >= 8) mmc->caps |= MMC_CAP_8_BIT_DATA; else if (mmc_slot(host).wires >= 4) mmc->caps |= MMC_CAP_4_BIT_DATA; if (mmc_slot(host).nonremovable) mmc->caps |= MMC_CAP_NONREMOVABLE; omap_hsmmc_conf_bus_power(host); /* Select DMA lines */ switch (host->id) { case OMAP_MMC1_DEVID: host->dma_line_tx = OMAP24XX_DMA_MMC1_TX; host->dma_line_rx = OMAP24XX_DMA_MMC1_RX; break; case OMAP_MMC2_DEVID: host->dma_line_tx = OMAP24XX_DMA_MMC2_TX; host->dma_line_rx = OMAP24XX_DMA_MMC2_RX; break; case OMAP_MMC3_DEVID: host->dma_line_tx = OMAP34XX_DMA_MMC3_TX; host->dma_line_rx = OMAP34XX_DMA_MMC3_RX; break; case OMAP_MMC4_DEVID: host->dma_line_tx = OMAP44XX_DMA_MMC4_TX; host->dma_line_rx = OMAP44XX_DMA_MMC4_RX; break; case OMAP_MMC5_DEVID: host->dma_line_tx = OMAP44XX_DMA_MMC5_TX; host->dma_line_rx = OMAP44XX_DMA_MMC5_RX; break; default: dev_err(mmc_dev(host->mmc), "Invalid MMC id\n"); goto err_irq; } /* Request IRQ for MMC operations */ ret = request_irq(host->irq, omap_hsmmc_irq, IRQF_DISABLED, mmc_hostname(mmc), host); if (ret) { dev_dbg(mmc_dev(host->mmc), "Unable to grab HSMMC IRQ\n"); goto err_irq; } /* initialize power supplies, gpios, etc */ if (pdata->init != NULL) { if (pdata->init(&pdev->dev) != 0) { dev_dbg(mmc_dev(host->mmc), "Unable to configure MMC IRQs\n"); goto err_irq_cd_init; } } mmc->ocr_avail = mmc_slot(host).ocr_mask; /* Request IRQ for card detect */ if ((mmc_slot(host).card_detect_irq)) { ret = request_irq(mmc_slot(host).card_detect_irq, omap_hsmmc_cd_handler, IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING | IRQF_DISABLED, mmc_hostname(mmc), host); if (ret) { dev_dbg(mmc_dev(host->mmc), "Unable to grab MMC CD IRQ\n"); goto err_irq_cd; } } OMAP_HSMMC_WRITE(host->base, ISE, INT_EN_MASK); OMAP_HSMMC_WRITE(host->base, IE, INT_EN_MASK); mmc_host_lazy_disable(host->mmc); omap_hsmmc_protect_card(host); mmc_add_host(mmc); if (mmc_slot(host).name != NULL) { ret = device_create_file(&mmc->class_dev, &dev_attr_slot_name); if (ret < 0) goto err_slot_name; } if (mmc_slot(host).card_detect_irq && mmc_slot(host).get_cover_state) { ret = device_create_file(&mmc->class_dev, &dev_attr_cover_switch); if (ret < 0) goto err_cover_switch; } omap_hsmmc_debugfs(mmc); return 0; err_cover_switch: device_remove_file(&mmc->class_dev, &dev_attr_cover_switch); err_slot_name: mmc_remove_host(mmc); err_irq_cd: free_irq(mmc_slot(host).card_detect_irq, host); err_irq_cd_init: free_irq(host->irq, host); err_irq: mmc_host_disable(host->mmc); clk_disable(host->iclk); clk_put(host->fclk); clk_put(host->iclk); if (host->got_dbclk) { clk_disable(host->dbclk); clk_put(host->dbclk); } err1: iounmap(host->base); err: dev_dbg(mmc_dev(host->mmc), "Probe Failed\n"); release_mem_region(res->start, res->end - res->start + 1); if (host) mmc_free_host(mmc); return ret; }
static int mmci_probe(struct amba_device *dev, void *id) { struct mmc_platform_data *plat = dev->dev.platform_data; struct mmci_host *host; struct mmc_host *mmc; int ret; /* must have platform data */ if (!plat) { ret = -EINVAL; goto out; } ret = amba_request_regions(dev, DRIVER_NAME); if (ret) goto out; mmc = mmc_alloc_host(sizeof(struct mmci_host), &dev->dev); if (!mmc) { ret = -ENOMEM; goto rel_regions; } host = mmc_priv(mmc); host->clk = clk_get(&dev->dev, "MCLK"); if (IS_ERR(host->clk)) { ret = PTR_ERR(host->clk); host->clk = NULL; goto host_free; } ret = clk_enable(host->clk); if (ret) goto clk_free; host->plat = plat; host->mclk = clk_get_rate(host->clk); /* * According to the spec, mclk is max 100 MHz, * so we try to adjust the clock down to this, * (if possible). */ if (host->mclk > 100000000) { ret = clk_set_rate(host->clk, 100000000); if (ret < 0) goto clk_disable; host->mclk = clk_get_rate(host->clk); DBG(host, "eventual mclk rate: %u Hz\n", host->mclk); } host->mmc = mmc; host->base = ioremap(dev->res.start, SZ_4K); if (!host->base) { ret = -ENOMEM; goto clk_disable; } mmc->ops = &mmci_ops; mmc->f_min = (host->mclk + 511) / 512; mmc->f_max = min(host->mclk, fmax); mmc->ocr_avail = plat->ocr_mask; /* * We can do SGIO */ mmc->max_hw_segs = 16; mmc->max_phys_segs = NR_SG; /* * Since we only have a 16-bit data length register, we must * ensure that we don't exceed 2^16-1 bytes in a single request. */ mmc->max_req_size = 65535; /* * Set the maximum segment size. Since we aren't doing DMA * (yet) we are only limited by the data length register. */ mmc->max_seg_size = mmc->max_req_size; /* * Block size can be up to 2048 bytes, but must be a power of two. */ mmc->max_blk_size = 2048; /* * No limit on the number of blocks transferred. */ mmc->max_blk_count = mmc->max_req_size; spin_lock_init(&host->lock); writel(0, host->base + MMCIMASK0); writel(0, host->base + MMCIMASK1); writel(0xfff, host->base + MMCICLEAR); ret = request_irq(dev->irq[0], mmci_irq, IRQF_SHARED, DRIVER_NAME " (cmd)", host); if (ret) goto unmap; ret = request_irq(dev->irq[1], mmci_pio_irq, IRQF_SHARED, DRIVER_NAME " (pio)", host); if (ret) goto irq0_free; writel(MCI_IRQENABLE, host->base + MMCIMASK0); amba_set_drvdata(dev, mmc); mmc_add_host(mmc); printk(KERN_INFO "%s: MMCI rev %x cfg %02x at 0x%016llx irq %d,%d\n", mmc_hostname(mmc), amba_rev(dev), amba_config(dev), (unsigned long long)dev->res.start, dev->irq[0], dev->irq[1]); init_timer(&host->timer); host->timer.data = (unsigned long)host; host->timer.function = mmci_check_status; host->timer.expires = jiffies + HZ; add_timer(&host->timer); return 0; irq0_free: free_irq(dev->irq[0], host); unmap: iounmap(host->base); clk_disable: clk_disable(host->clk); clk_free: clk_put(host->clk); host_free: mmc_free_host(mmc); rel_regions: amba_release_regions(dev); out: return ret; }
static int __devinit ak98_sdio_probe(struct platform_device *pdev) { struct ak98_mci_platform_data *plat = pdev->dev.platform_data; struct ak98_mci_host *host; struct mmc_host *mmc; struct resource *res; int irq; int ret; /* must have platform data */ if (!plat) { ret = -EINVAL; goto out; } PK("%s\n", __func__); res = platform_get_resource(pdev, IORESOURCE_MEM, 0); irq = platform_get_irq(pdev, 0); PK("res: %x, %u", res->start, resource_size(res)); res = request_mem_region(res->start, resource_size(res), DRIVER_NAME); if (!res) { ret = -EBUSY; goto out; } PK("res: %x, %u\n", res->start, resource_size(res)); mmc = mmc_alloc_host(sizeof(struct ak98_mci_host), &pdev->dev); if (!mmc) { ret = -ENOMEM; goto out; } host = mmc_priv(mmc); host->mmc = mmc; host->gpio_wp = -ENOSYS; host->gpio_cd = -ENOSYS; ak98_sdio_reset(); host->clk = clk_get(&pdev->dev, "sdio_clk"); if (IS_ERR(host->clk)) { ret = PTR_ERR(host->clk); host->clk = NULL; goto host_free; } ret = clk_enable(host->clk); if (ret) goto clk_free; host->plat = plat; host->asic_clkrate = ak98_get_asic_clk(); host->base = ioremap(res->start, resource_size(res)); if (!host->base) { ret = -ENOMEM; goto clk_disable; } PK("asic_clkrate: %luhz,host->base=0x%x\n", host->asic_clkrate,host->base); mmc->ops = &ak98_mci_ops; mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34; mmc->caps = MMC_CAP_4_BIT_DATA; #if 0 mmc->caps |= MMC_CAP_SD_HIGHSPEED | MMC_CAP_MMC_HIGHSPEED; #endif mmc->caps |= MMC_CAP_SDIO_IRQ; // mmc->caps |= MMC_CAP_NEEDS_POLL; mmc->f_min = host->asic_clkrate / (255+1 + 255+1); mmc->f_max = host->asic_clkrate / (0+1 + 0+1); mmc->f_max = mmc->f_max < fmax ? mmc->f_max : fmax; /* * We can do SGIO */ mmc->max_hw_segs = 16; mmc->max_phys_segs = NR_SG; /* * Since we only have a 16-bit data length register, we must * ensure that we don't exceed 2^16-1 bytes in a single request. */ mmc->max_req_size = 65535; /* * Set the maximum segment size. Since we aren't doing DMA * (yet) we are only limited by the data length register. */ mmc->max_seg_size = mmc->max_req_size; #if 0 /* * Block size can be up to 2048 bytes, but must be a power of two. */ mmc->max_blk_size = 2048; #else /* as l2 fifo limit to 512 bytes */ mmc->max_blk_size = 512; #endif /* * No limit on the number of blocks transferred. */ mmc->max_blk_count = mmc->max_req_size; spin_lock_init(&host->lock); ak98_group_config(ePIN_AS_SDIO); writel(MCI_ENABLE|MCI_FAIL_TRIGGER, host->base + AK98MCICLOCK); PK("%s: MCICLOCK: 0x%08x\n", __func__, readl(host->base + AK98MCICLOCK)); writel(0, host->base + AK98MCIMASK); PK("request irq %i\n", irq); ret = request_irq(irq, ak98_sdio_irq, IRQF_DISABLED, DRIVER_NAME " (cmd)", host); if (ret) goto unmap; host->irq_mci = irq; if(plat->gpio_cd >= 0) { host->gpio_cd = plat->gpio_cd; ak98_gpio_cfgpin(host->gpio_cd, AK98_GPIO_DIR_INPUT); ak98_gpio_pulldown(host->gpio_cd,AK98_PULLDOWN_DISABLE); ak98_gpio_pullup(host->gpio_cd, AK98_PULLUP_ENABLE); setup_timer(&host->detect_timer, ak98_sdio_detect_change, (unsigned long)host); irq = ak98_gpio_to_irq(host->gpio_cd); ret = request_irq(irq, ak98_sdio_card_detect_irq, IRQF_DISABLED, DRIVER_NAME " cd", host); printk("request gpio irq ret = %d, irq=%d", ret, irq); if (ret) goto irq_free; host->irq_cd = irq; host->irq_cd_type = IRQ_TYPE_LEVEL_LOW; } if(plat->gpio_wp >= 0) { host->gpio_wp = plat->gpio_wp; ak98_gpio_cfgpin(host->gpio_wp, AK98_GPIO_DIR_INPUT); ak98_gpio_pullup(host->gpio_wp, AK98_PULLUP_ENABLE); ak98_gpio_pulldown(host->gpio_wp,AK98_PULLDOWN_DISABLE); } platform_set_drvdata(pdev, mmc); ret = ak98sdio_cpufreq_register(host); if (ret) { goto irq_free; } ret = mmc_add_host(mmc); if (ret) { goto cpufreq_free; } PK(KERN_INFO "%s: ak98MCI at 0x%016llx irq %d\n", mmc_hostname(mmc), (unsigned long long)res->start, host->irq_mci); return 0; cpufreq_free: PK("ERR cpufreq_free\n"); ak98sdio_cpufreq_deregister(host); irq_free: PK("ERR irq_free\n"); free_irq(host->irq_mci, host); unmap: PK("ERR unmap\n"); iounmap(host->base); clk_disable: PK("ERR clk_disable\n"); clk_disable(host->clk); clk_free: PK("ERR clk_free\n"); clk_put(host->clk); host_free: PK("ERR host_free\n"); mmc_free_host(mmc); out: PK("ERR out\n"); return ret; }
static int s3c_hsmmc_probe (struct platform_device *pdev) { struct mmc_host *mmc; struct s3c_hsmmc_host *host; struct s3c_hsmmc_cfg *plat_data; uint i; int ret; mmc = mmc_alloc_host(sizeof(struct s3c_hsmmc_host), &pdev->dev); if (mmc==NULL) { ret = -ENOMEM; printk("Failed to get mmc_alloc_host.\n"); return ret; } plat_data = s3c_hsmmc_get_platdata(&pdev->dev); host = mmc_priv(mmc); host->mmc = mmc; host->plat_data = plat_data; host->mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!host->mem) { printk("Failed to get io memory region resouce.\n"); ret = -ENOENT; goto probe_free_host; } host->mem = request_mem_region(host->mem->start, RESSIZE(host->mem), pdev->name); if (!host->mem) { printk("Failed to request io memory region.\n"); ret = -ENOENT; goto probe_free_host; } host->base = ioremap(host->mem->start, (host->mem->end - host->mem->start)+1); if (host->base == NULL) { printk(KERN_ERR "Failed to remap register block\n"); return -ENOMEM; } host->irq = platform_get_irq(pdev, 0); if (host->irq == 0) { printk("Failed to get interrupt resouce.\n"); ret = -EINVAL; goto untasklet; } #if defined(CONFIG_CPU_S3C6410) /* To detect a card inserted on channel 0, an external interrupt is used. */ if ((plat_data->enabled == 1) && (plat_data->hwport == 0)) { host->irq_cd = platform_get_irq(pdev, 1); if (host->irq_cd == 0) { printk("Failed to get interrupt resouce.\n"); ret = -EINVAL; goto untasklet; } set_irq_type(host->irq_cd, IRQ_TYPE_LEVEL_LOW); } #endif host->flags |= S3C_HSMMC_USE_DMA; s3c_hsmmc_reset(host, S3C_HSMMC_RESET_ALL); clk_enable(clk_get(&pdev->dev, "hsmmc")); /* register some clock sources if exist */ for (i=0; i<NUM_OF_HSMMC_CLKSOURCES; i++) { host->clk[i] = clk_get(&pdev->dev, plat_data->clocks[i].name); if (IS_ERR(host->clk[i])) { ret = PTR_ERR(host->clk[i]); host->clk[i] = ERR_PTR(-ENOENT); } if (clk_enable(host->clk[i])) { host->clk[i] = ERR_PTR(-ENOENT); } if (!IS_ERR(host->clk[i])) { DBG("MMC clock source[%d], %s is %ld Hz\n",i, plat_data->clocks[i].name, clk_get_rate(host->clk[i])); } } mmc->ops = &s3c_hsmmc_ops; mmc->ocr_avail = MMC_VDD_32_33|MMC_VDD_33_34; mmc->f_min = 400 * 1000; /* at least 400kHz */ /* you must make sure that our hsmmc block can support * up to 52MHz. */ mmc->f_max = 100 * MHZ; mmc->caps = plat_data->host_caps; DBG("mmc->caps: %08x\n", mmc->caps); spin_lock_init(&host->lock); /* * Maximum number of segments. Hardware cannot do scatter lists. * XXX: must modify later. */ #ifdef CONFIG_HSMMC_SCATTERGATHER mmc->max_hw_segs = CONFIG_S3C_HSMMC_MAX_HW_SEGS; mmc->max_phys_segs = CONFIG_S3C_HSMMC_MAX_HW_SEGS; #else mmc->max_hw_segs = 1; #endif /* * Maximum segment size. Could be one segment with the maximum number * of sectors. */ mmc->max_blk_size = 512; mmc->max_seg_size = 128 * mmc->max_blk_size; mmc->max_blk_count = 128; mmc->max_req_size = mmc->max_seg_size; init_timer(&host->timer); host->timer.data = (unsigned long)host; host->timer.function = s3c_hsmmc_check_status; host->timer.expires = jiffies + HZ; /* * Init tasklets. */ tasklet_init(&host->card_tasklet, s3c_hsmmc_tasklet_card, (unsigned long)host); tasklet_init(&host->finish_tasklet, s3c_hsmmc_tasklet_finish, (unsigned long)host); ret = request_irq(host->irq, s3c_hsmmc_irq, 0, DRIVER_NAME, host); if (ret) goto untasklet; #if defined(CONFIG_CPU_S3C6410) if ((plat_data->enabled == 1) && (plat_data->hwport == 0)) { ret = request_irq(host->irq_cd, s3c_hsmmc_irq_cd, 0, DRIVER_NAME, host); if (ret) goto untasklet; } #endif s3c_hsmmc_ios_init(host); mmc_add_host(mmc); #if defined(CONFIG_PM) global_host[plat_data->hwport] = host; #endif printk(KERN_INFO "[s3c_hsmmc_probe]: %s.%d: at 0x%p with irq %d. clk src:", pdev->name, pdev->id, host->base, host->irq); for (i=0; i<NUM_OF_HSMMC_CLKSOURCES; i++) { if (!IS_ERR(host->clk[i])) printk(" %s", host->clk[i]->name); } printk("\n"); return 0; untasklet: tasklet_kill(&host->card_tasklet); tasklet_kill(&host->finish_tasklet); for (i=0; i<NUM_OF_HSMMC_CLKSOURCES; i++) { if (host->clk[i] != ERR_PTR(-ENOENT)) { clk_disable(host->clk[i]); clk_put(host->clk[i]); } } probe_free_host: mmc_free_host(mmc); return ret; }
static int mmci_driver_probe(struct vmm_device *dev, const struct vmm_devtree_nodeid *devid) { int rc; u32 sdi; virtual_addr_t base; physical_addr_t basepa; struct mmc_host *mmc; struct mmci_host *host; mmc = mmc_alloc_host(sizeof(struct mmci_host), dev); if (!mmc) { rc = VMM_ENOMEM; goto free_nothing; } host = mmc_priv(mmc); rc = vmm_devtree_regmap(dev->node, &base, 0); if (rc) { goto free_host; } host->base = (struct sdi_registers *)base; rc = vmm_devtree_irq_get(dev->node, &host->irq0, 0); if (rc) { goto free_reg; } if ((rc = vmm_host_irq_register(host->irq0, dev->name, mmci_cmd_irq_handler, mmc))) { goto free_reg; } rc = vmm_devtree_irq_get(dev->node, &host->irq1, 1); if (!rc) { if ((rc = vmm_host_irq_register(host->irq1, dev->name, mmci_pio_irq_handler, mmc))) { goto free_irq0; } host->singleirq = 0; } else { host->singleirq = 1; } /* Retrive matching data */ host->pwr_init = ((const u32 *)devid->data)[0]; host->clkdiv_init = ((const u32 *)devid->data)[1]; host->voltages = ((const u32 *)devid->data)[2]; host->caps = ((const u32 *)devid->data)[3]; host->clock_in = ((const u32 *)devid->data)[4]; host->clock_min = ((const u32 *)devid->data)[5]; host->clock_max = ((const u32 *)devid->data)[6]; host->b_max = ((const u32 *)devid->data)[7]; host->version2 = ((const u32 *)devid->data)[8]; /* Initialize power and clock divider */ vmm_writel(host->pwr_init, &host->base->power); vmm_writel(host->clkdiv_init, &host->base->clock); vmm_udelay(CLK_CHANGE_DELAY); /* Disable interrupts */ sdi = vmm_readl(&host->base->mask0) & ~SDI_MASK0_MASK; vmm_writel(sdi, &host->base->mask0); /* Setup mmc host configuration */ mmc->caps = host->caps; mmc->voltages = host->voltages; mmc->f_min = host->clock_min; mmc->f_max = host->clock_max; mmc->b_max = host->b_max; /* Setup mmc host operations */ mmc->ops.send_cmd = mmci_request; mmc->ops.set_ios = mmci_set_ios; mmc->ops.init_card = mmci_init_card; mmc->ops.get_cd = NULL; mmc->ops.get_wp = NULL; rc = mmc_add_host(mmc); if (rc) { goto free_irq1; } dev->priv = mmc; vmm_devtree_regaddr(dev->node, &basepa, 0); vmm_printf("%s: PL%03x manf %x rev%u at 0x%08llx irq %d,%d (pio)\n", dev->name, amba_part(dev), amba_manf(dev), amba_rev(dev), (unsigned long long)basepa, host->irq0, host->irq1); return VMM_OK; free_irq1: if (!host->singleirq) { vmm_host_irq_unregister(host->irq1, mmc); } free_irq0: vmm_host_irq_unregister(host->irq0, mmc); free_reg: vmm_devtree_regunmap(dev->node, (virtual_addr_t)host->base, 0); free_host: mmc_free_host(mmc); free_nothing: return rc; }
static int mxs_mmc_probe(struct platform_device *pdev) { const struct of_device_id *of_id = of_match_device(mxs_mmc_dt_ids, &pdev->dev); struct device_node *np = pdev->dev.of_node; struct mxs_mmc_host *host; struct mmc_host *mmc; struct resource *iores; int ret = 0, irq_err; struct regulator *reg_vmmc; struct mxs_ssp *ssp; irq_err = platform_get_irq(pdev, 0); if (irq_err < 0) return irq_err; mmc = mmc_alloc_host(sizeof(struct mxs_mmc_host), &pdev->dev); if (!mmc) return -ENOMEM; host = mmc_priv(mmc); ssp = &host->ssp; ssp->dev = &pdev->dev; iores = platform_get_resource(pdev, IORESOURCE_MEM, 0); ssp->base = devm_ioremap_resource(&pdev->dev, iores); if (IS_ERR(ssp->base)) { ret = PTR_ERR(ssp->base); goto out_mmc_free; } ssp->devid = (enum mxs_ssp_id) of_id->data; host->mmc = mmc; host->sdio_irq_en = 0; reg_vmmc = devm_regulator_get(&pdev->dev, "vmmc"); if (!IS_ERR(reg_vmmc)) { ret = regulator_enable(reg_vmmc); if (ret) { dev_err(&pdev->dev, "Failed to enable vmmc regulator: %d\n", ret); goto out_mmc_free; } } ssp->clk = devm_clk_get(&pdev->dev, NULL); if (IS_ERR(ssp->clk)) { ret = PTR_ERR(ssp->clk); goto out_mmc_free; } ret = clk_prepare_enable(ssp->clk); if (ret) goto out_mmc_free; ret = mxs_mmc_reset(host); if (ret) { dev_err(&pdev->dev, "Failed to reset mmc: %d\n", ret); goto out_clk_disable; } ssp->dmach = dma_request_slave_channel(&pdev->dev, "rx-tx"); if (!ssp->dmach) { dev_err(mmc_dev(host->mmc), "%s: failed to request dma\n", __func__); ret = -ENODEV; goto out_clk_disable; } /* set mmc core parameters */ mmc->ops = &mxs_mmc_ops; mmc->caps = MMC_CAP_SD_HIGHSPEED | MMC_CAP_MMC_HIGHSPEED | MMC_CAP_SDIO_IRQ | MMC_CAP_NEEDS_POLL; host->broken_cd = of_property_read_bool(np, "broken-cd"); mmc->f_min = 400000; mmc->f_max = 288000000; ret = mmc_of_parse(mmc); if (ret) goto out_clk_disable; mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34; mmc->max_segs = 52; mmc->max_blk_size = 1 << 0xf; mmc->max_blk_count = (ssp_is_old(ssp)) ? 0xff : 0xffffff; mmc->max_req_size = (ssp_is_old(ssp)) ? 0xffff : 0xffffffff; mmc->max_seg_size = dma_get_max_seg_size(ssp->dmach->device->dev); platform_set_drvdata(pdev, mmc); ret = devm_request_irq(&pdev->dev, irq_err, mxs_mmc_irq_handler, 0, dev_name(&pdev->dev), host); if (ret) goto out_free_dma; spin_lock_init(&host->lock); ret = mmc_add_host(mmc); if (ret) goto out_free_dma; dev_info(mmc_dev(host->mmc), "initialized\n"); return 0; out_free_dma: dma_release_channel(ssp->dmach); out_clk_disable: clk_disable_unprepare(ssp->clk); out_mmc_free: mmc_free_host(mmc); return ret; }
static int __devinit sunximmc_probe(struct platform_device *pdev) { struct sunxi_mmc_host *smc_host = NULL; struct mmc_host *mmc = NULL; int ret = 0; char mmc_para[16] = {0}; int card_detmode = 0; SMC_MSG("%s: pdev->name: %s, pdev->id: %d\n", dev_name(&pdev->dev), pdev->name, pdev->id); mmc = mmc_alloc_host(sizeof(struct sunxi_mmc_host), &pdev->dev); if (!mmc) { SMC_ERR("mmc alloc host failed\n"); ret = -ENOMEM; goto probe_out; } smc_host = mmc_priv(mmc); memset((void*)smc_host, 0, sizeof(smc_host)); smc_host->mmc = mmc; smc_host->pdev = pdev; spin_lock_init(&smc_host->lock); tasklet_init(&smc_host->tasklet, sunximmc_tasklet, (unsigned long) smc_host); smc_host->cclk = 400000; smc_host->mod_clk = SMC_MAX_MOD_CLOCK(pdev->id); smc_host->clk_source = SMC_MOD_CLK_SRC(pdev->id); mmc->ops = &sunximmc_ops; mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34; mmc->caps = MMC_CAP_4_BIT_DATA|MMC_CAP_MMC_HIGHSPEED|MMC_CAP_SD_HIGHSPEED|MMC_CAP_SDIO_IRQ; mmc->f_min = 400000; mmc->f_max = SMC_MAX_IO_CLOCK(pdev->id); #ifdef MMC_PM_IGNORE_PM_NOTIFY if (pdev->id==3 && !mmc_pm_io_shd_suspend_host()) mmc->pm_flags = MMC_PM_IGNORE_PM_NOTIFY; #endif mmc->max_blk_count = 4095; mmc->max_blk_size = 4095; mmc->max_req_size = 4095 * 512; //32bit byte counter = 2^32 - 1 mmc->max_seg_size = mmc->max_req_size; mmc->max_segs = 256; if (sunximmc_resource_request(smc_host)) { SMC_ERR("%s: Failed to get resouce.\n", dev_name(&pdev->dev)); goto probe_free_host; } if (sunximmc_set_src_clk(smc_host)) { goto probe_free_host; } sunximmc_init_controller(smc_host); smc_host->power_on = 1; sunximmc_procfs_attach(smc_host); /* irq */ smc_host->irq = platform_get_irq(pdev, 0); if (smc_host->irq == 0) { dev_err(&pdev->dev, "Failed to get interrupt resouce.\n"); ret = -EINVAL; goto probe_free_resource; } if (request_irq(smc_host->irq, sunximmc_irq, 0, DRIVER_NAME, smc_host)) { dev_err(&pdev->dev, "Failed to request smc card interrupt.\n"); ret = -ENOENT; goto probe_free_irq; } disable_irq(smc_host->irq); /* add host */ ret = mmc_add_host(mmc); if (ret) { dev_err(&pdev->dev, "Failed to add mmc host.\n"); goto probe_free_irq; } platform_set_drvdata(pdev, mmc); //fetch card detecetd mode sprintf(mmc_para, "mmc%d_para", pdev->id); ret = script_parser_fetch(mmc_para, "sdc_detmode", &card_detmode, sizeof(int)); if (ret) { SMC_ERR("sdc fetch card detect mode failed\n"); } smc_host->cd_mode = card_detmode; if (smc_host->cd_mode == CARD_DETECT_BY_GPIO) { //initial card detect timer init_timer(&smc_host->cd_timer); smc_host->cd_timer.expires = jiffies + 1*HZ; smc_host->cd_timer.function = &sunximmc_cd_timer; smc_host->cd_timer.data = (unsigned long)smc_host; add_timer(&smc_host->cd_timer); smc_host->present = 0; } enable_irq(smc_host->irq); mutex_lock(&sw_host_rescan_mutex); if (smc_host->cd_mode == CARD_ALWAYS_PRESENT || sw_host_rescan_pending[pdev->id]) { smc_host->present = 1; mmc_detect_change(smc_host->mmc, msecs_to_jiffies(300)); } sw_host[pdev->id] = smc_host; mutex_unlock(&sw_host_rescan_mutex); SMC_MSG("mmc%d Probe: base:0x%p irq:%u dma:%u pdes:0x%p, ret %d.\n", pdev->id, smc_host->smc_base, smc_host->irq, smc_host->dma_no, smc_host->pdes, ret); goto probe_out; probe_free_irq: if (smc_host->irq) { free_irq(smc_host->irq, smc_host); } probe_free_resource: sunximmc_resource_release(smc_host); probe_free_host: mmc_free_host(mmc); probe_out: return ret; }
/* * Probe for the device */ static int __init at91_mci_probe(struct platform_device *pdev) { struct mmc_host *mmc; struct at91mci_host *host; struct resource *res; int ret; pr_debug("Probe MCI devices\n"); res = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!res) return -ENXIO; if (!request_mem_region(res->start, res->end - res->start + 1, DRIVER_NAME)) return -EBUSY; mmc = mmc_alloc_host(sizeof(struct at91mci_host), &pdev->dev); if (!mmc) { pr_debug("Failed to allocate mmc host\n"); release_mem_region(res->start, res->end - res->start + 1); return -ENOMEM; } mmc->ops = &at91_mci_ops; mmc->f_min = 375000; mmc->f_max = 25000000; mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34; mmc->max_blk_size = 4095; mmc->max_blk_count = mmc->max_req_size; host = mmc_priv(mmc); host->mmc = mmc; host->buffer = NULL; host->bus_mode = 0; host->board = pdev->dev.platform_data; if (host->board->wire4) { if (cpu_is_at91sam9260() || cpu_is_at91sam9263()) mmc->caps |= MMC_CAP_4_BIT_DATA; else printk("AT91 MMC: 4 wire bus mode not supported" " - using 1 wire\n"); } /* * Get Clock */ host->mci_clk = clk_get(&pdev->dev, "mci_clk"); if (IS_ERR(host->mci_clk)) { printk(KERN_ERR "AT91 MMC: no clock defined.\n"); mmc_free_host(mmc); release_mem_region(res->start, res->end - res->start + 1); return -ENODEV; } /* * Map I/O region */ host->baseaddr = ioremap(res->start, res->end - res->start + 1); if (!host->baseaddr) { clk_put(host->mci_clk); mmc_free_host(mmc); release_mem_region(res->start, res->end - res->start + 1); return -ENOMEM; } /* * Reset hardware */ clk_enable(host->mci_clk); /* Enable the peripheral clock */ at91_mci_disable(host); at91_mci_enable(host); /* * Allocate the MCI interrupt */ host->irq = platform_get_irq(pdev, 0); ret = request_irq(host->irq, at91_mci_irq, IRQF_SHARED, DRIVER_NAME, host); if (ret) { printk(KERN_ERR "AT91 MMC: Failed to request MCI interrupt\n"); clk_disable(host->mci_clk); clk_put(host->mci_clk); mmc_free_host(mmc); iounmap(host->baseaddr); release_mem_region(res->start, res->end - res->start + 1); return ret; } platform_set_drvdata(pdev, mmc); /* * Add host to MMC layer */ if (host->board->det_pin) { host->present = !at91_get_gpio_value(host->board->det_pin); device_init_wakeup(&pdev->dev, 1); } else host->present = -1; mmc_add_host(mmc); /* * monitor card insertion/removal if we can */ if (host->board->det_pin) { ret = request_irq(host->board->det_pin, at91_mmc_det_irq, 0, DRIVER_NAME, host); if (ret) printk(KERN_ERR "AT91 MMC: Couldn't allocate MMC detect irq\n"); } pr_debug("Added MCI driver\n"); return 0; }
static int __init at91_mci_probe(struct platform_device *pdev) { struct mmc_host *mmc; struct at91mci_host *host; struct resource *res; int ret; res = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!res) return -ENXIO; if (!request_mem_region(res->start, resource_size(res), DRIVER_NAME)) return -EBUSY; mmc = mmc_alloc_host(sizeof(struct at91mci_host), &pdev->dev); if (!mmc) { ret = -ENOMEM; dev_dbg(&pdev->dev, "couldn't allocate mmc host\n"); goto fail6; } mmc->ops = &at91_mci_ops; mmc->f_min = 375000; mmc->f_max = 25000000; mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34; mmc->caps = 0; mmc->max_blk_size = MCI_MAXBLKSIZE; mmc->max_blk_count = MCI_BLKATONCE; mmc->max_req_size = MCI_BUFSIZE; mmc->max_segs = MCI_BLKATONCE; mmc->max_seg_size = MCI_BUFSIZE; host = mmc_priv(mmc); host->mmc = mmc; host->bus_mode = 0; host->board = pdev->dev.platform_data; if (host->board->wire4) { if (at91mci_is_mci1rev2xx()) mmc->caps |= MMC_CAP_4_BIT_DATA; else dev_warn(&pdev->dev, "4 wire bus mode not supported" " - using 1 wire\n"); } host->buffer = dma_alloc_coherent(&pdev->dev, MCI_BUFSIZE, &host->physical_address, GFP_KERNEL); if (!host->buffer) { ret = -ENOMEM; dev_err(&pdev->dev, "Can't allocate transmit buffer\n"); goto fail5; } if (at91mci_is_mci1rev2xx()) { if (host->board->wire4 || !host->board->slot_b) mmc->caps |= MMC_CAP_SDIO_IRQ; } if (gpio_is_valid(host->board->det_pin)) { ret = gpio_request(host->board->det_pin, "mmc_detect"); if (ret < 0) { dev_dbg(&pdev->dev, "couldn't claim card detect pin\n"); goto fail4b; } } if (gpio_is_valid(host->board->wp_pin)) { ret = gpio_request(host->board->wp_pin, "mmc_wp"); if (ret < 0) { dev_dbg(&pdev->dev, "couldn't claim wp sense pin\n"); goto fail4; } } if (gpio_is_valid(host->board->vcc_pin)) { ret = gpio_request(host->board->vcc_pin, "mmc_vcc"); if (ret < 0) { dev_dbg(&pdev->dev, "couldn't claim vcc switch pin\n"); goto fail3; } } host->mci_clk = clk_get(&pdev->dev, "mci_clk"); if (IS_ERR(host->mci_clk)) { ret = -ENODEV; dev_dbg(&pdev->dev, "no mci_clk?\n"); goto fail2; } host->baseaddr = ioremap(res->start, resource_size(res)); if (!host->baseaddr) { ret = -ENOMEM; goto fail1; } clk_enable(host->mci_clk); at91_mci_disable(host); at91_mci_enable(host); host->irq = platform_get_irq(pdev, 0); ret = request_irq(host->irq, at91_mci_irq, IRQF_SHARED, mmc_hostname(mmc), host); if (ret) { dev_dbg(&pdev->dev, "request MCI interrupt failed\n"); goto fail0; } setup_timer(&host->timer, at91_timeout_timer, (unsigned long)host); platform_set_drvdata(pdev, mmc); if (gpio_is_valid(host->board->det_pin)) { host->present = !gpio_get_value(host->board->det_pin); } else host->present = -1; mmc_add_host(mmc); if (gpio_is_valid(host->board->det_pin)) { ret = request_irq(gpio_to_irq(host->board->det_pin), at91_mmc_det_irq, 0, mmc_hostname(mmc), host); if (ret) dev_warn(&pdev->dev, "request MMC detect irq failed\n"); else device_init_wakeup(&pdev->dev, 1); } pr_debug("Added MCI driver\n"); return 0; fail0: clk_disable(host->mci_clk); iounmap(host->baseaddr); fail1: clk_put(host->mci_clk); fail2: if (gpio_is_valid(host->board->vcc_pin)) gpio_free(host->board->vcc_pin); fail3: if (gpio_is_valid(host->board->wp_pin)) gpio_free(host->board->wp_pin); fail4: if (gpio_is_valid(host->board->det_pin)) gpio_free(host->board->det_pin); fail4b: if (host->buffer) dma_free_coherent(&pdev->dev, MCI_BUFSIZE, host->buffer, host->physical_address); fail5: mmc_free_host(mmc); fail6: release_mem_region(res->start, resource_size(res)); dev_err(&pdev->dev, "probe failed, err %d\n", ret); return ret; }