static int __devexit sdhci_pxav3_remove(struct platform_device *pdev)
{
	struct sdhci_host *host = platform_get_drvdata(pdev);
	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
	struct sdhci_pxa *pxa = pltfm_host->priv;
	struct sdhci_pxa_platdata *pdata = pdev->dev.platform_data;

	sdhci_remove_host(host, 1);
	if (pdata && pdata->flags & PXA_FLAG_EN_PM_RUNTIME)
		pm_runtime_disable(&pdev->dev);

	if (pdata)
		pm_qos_remove_request(&pdata->qos_idle);

	clk_disable_unprepare(pltfm_host->clk);
	clk_put(pltfm_host->clk);

	if (pdata && pdata->cd_type == PXA_SDHCI_CD_GPIO &&
			gpio_is_valid(pdata->ext_cd_gpio))
		mmc_gpio_free_cd(host->mmc);

	sdhci_pltfm_free(pdev);
	kfree(pxa);

	platform_set_drvdata(pdev, NULL);

	return 0;
}
Ejemplo n.º 2
0
static int sdhci_sirf_remove(struct platform_device *pdev)
{
	struct sdhci_host *host = platform_get_drvdata(pdev);
	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
	struct sdhci_sirf_priv *priv = sdhci_pltfm_priv(pltfm_host);

	sdhci_pltfm_unregister(pdev);

	if (gpio_is_valid(priv->gpio_cd))
		mmc_gpio_free_cd(host->mmc);

	clk_disable_unprepare(priv->clk);
	return 0;
}
Ejemplo n.º 3
0
static int __devexit sh_mmcif_remove(struct platform_device *pdev)
{
	struct sh_mmcif_host *host = platform_get_drvdata(pdev);
	struct sh_mmcif_plat_data *pd = pdev->dev.platform_data;
	int irq[2];

	host->dying = true;
	clk_enable(host->hclk);
	pm_runtime_get_sync(&pdev->dev);

	dev_pm_qos_hide_latency_limit(&pdev->dev);

	if (pd && pd->use_cd_gpio)
		mmc_gpio_free_cd(host->mmc);

	mmc_remove_host(host->mmc);
	sh_mmcif_writel(host->addr, MMCIF_CE_INT_MASK, MASK_ALL);

	/*
	 * FIXME: cancel_delayed_work(_sync)() and free_irq() race with the
	 * mmc_remove_host() call above. But swapping order doesn't help either
	 * (a query on the linux-mmc mailing list didn't bring any replies).
	 */
	cancel_delayed_work_sync(&host->timeout_work);

	if (host->addr)
		iounmap(host->addr);

	irq[0] = platform_get_irq(pdev, 0);
	irq[1] = platform_get_irq(pdev, 1);

	free_irq(irq[0], host);
	free_irq(irq[1], host);

	platform_set_drvdata(pdev, NULL);

	clk_disable(host->hclk);
	mmc_free_host(host->mmc);
	pm_runtime_put_sync(&pdev->dev);
	pm_runtime_disable(&pdev->dev);

	return 0;
}
Ejemplo n.º 4
0
static int __devexit sdhci_pxav3_remove(struct platform_device *pdev)
{
	struct sdhci_host *host = platform_get_drvdata(pdev);
	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
	struct sdhci_pxa *pxa = pltfm_host->priv;
	struct sdhci_pxa_platdata *pdata = pdev->dev.platform_data;

	sdhci_remove_host(host, 1);

	clk_disable_unprepare(pltfm_host->clk);
	clk_put(pltfm_host->clk);

	if (gpio_is_valid(pdata->ext_cd_gpio))
		mmc_gpio_free_cd(host->mmc);

	sdhci_pltfm_free(pdev);
	kfree(pxa);

	platform_set_drvdata(pdev, NULL);

	return 0;
}
Ejemplo n.º 5
0
static int __devinit sdhci_pxav3_probe(struct platform_device *pdev)
{
	struct sdhci_pltfm_host *pltfm_host;
	struct sdhci_pxa_platdata *pdata = pdev->dev.platform_data;
	struct device *dev = &pdev->dev;
	struct sdhci_host *host = NULL;
	struct sdhci_pxa *pxa = NULL;
	const struct of_device_id *match;

	int ret;
	struct clk *clk;

	pxa = kzalloc(sizeof(struct sdhci_pxa), GFP_KERNEL);
	if (!pxa)
		return -ENOMEM;

	host = sdhci_pltfm_init(pdev, NULL);
	if (IS_ERR(host)) {
		kfree(pxa);
		return PTR_ERR(host);
	}
	pltfm_host = sdhci_priv(host);
	pltfm_host->priv = pxa;

	clk = clk_get(dev, NULL);
	if (IS_ERR(clk)) {
		dev_err(dev, "failed to get io clock\n");
		ret = PTR_ERR(clk);
		goto err_clk_get;
	}
	pltfm_host->clk = clk;
	clk_prepare_enable(clk);

	host->quirks = SDHCI_QUIRK_BROKEN_TIMEOUT_VAL
		| SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC
		| SDHCI_QUIRK_32BIT_ADMA_SIZE
		| SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN;

	/* enable 1/8V DDR capable */
	host->mmc->caps |= MMC_CAP_1_8V_DDR;

	match = of_match_device(of_match_ptr(sdhci_pxav3_of_match), &pdev->dev);
	if (match)
		pdata = pxav3_get_mmc_pdata(dev);

	if (pdata) {
		if (pdata->flags & PXA_FLAG_CARD_PERMANENT) {
			/* on-chip device */
			host->quirks |= SDHCI_QUIRK_BROKEN_CARD_DETECTION;
			host->mmc->caps |= MMC_CAP_NONREMOVABLE;
		}

		/* If slot design supports 8 bit data, indicate this to MMC. */
		if (pdata->flags & PXA_FLAG_SD_8_BIT_CAPABLE_SLOT)
			host->mmc->caps |= MMC_CAP_8_BIT_DATA;

		if (pdata->quirks)
			host->quirks |= pdata->quirks;
		if (pdata->quirks2)
			host->quirks2 |= pdata->quirks2;
		if (pdata->host_caps)
			host->mmc->caps |= pdata->host_caps;
		if (pdata->host_caps2)
			host->mmc->caps2 |= pdata->host_caps2;
		if (pdata->pm_caps)
			host->mmc->pm_caps |= pdata->pm_caps;

		if (gpio_is_valid(pdata->ext_cd_gpio)) {
			ret = mmc_gpio_request_cd(host->mmc, pdata->ext_cd_gpio);
			if (ret) {
				dev_err(mmc_dev(host->mmc),
					"failed to allocate card detect gpio\n");
				goto err_cd_req;
			}
		}
	}

	host->ops = &pxav3_sdhci_ops;

	sdhci_get_of_property(pdev);

	ret = sdhci_add_host(host);
	if (ret) {
		dev_err(&pdev->dev, "failed to add host\n");
		goto err_add_host;
	}

	platform_set_drvdata(pdev, host);

	return 0;

err_add_host:
	clk_disable_unprepare(clk);
	clk_put(clk);
	mmc_gpio_free_cd(host->mmc);
err_cd_req:
err_clk_get:
	sdhci_pltfm_free(pdev);
	kfree(pxa);
	return ret;
}
Ejemplo n.º 6
0
static int __devinit sh_mmcif_probe(struct platform_device *pdev)
{
	int ret = 0, irq[2];
	struct mmc_host *mmc;
	struct sh_mmcif_host *host;
	struct sh_mmcif_plat_data *pd = pdev->dev.platform_data;
	struct resource *res;
	void __iomem *reg;
	char clk_name[8];

	irq[0] = platform_get_irq(pdev, 0);
	irq[1] = platform_get_irq(pdev, 1);
	if (irq[0] < 0 || irq[1] < 0) {
		dev_err(&pdev->dev, "Get irq error\n");
		return -ENXIO;
	}
	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
	if (!res) {
		dev_err(&pdev->dev, "platform_get_resource error.\n");
		return -ENXIO;
	}
	reg = ioremap(res->start, resource_size(res));
	if (!reg) {
		dev_err(&pdev->dev, "ioremap error.\n");
		return -ENOMEM;
	}

	mmc = mmc_alloc_host(sizeof(struct sh_mmcif_host), &pdev->dev);
	if (!mmc) {
		ret = -ENOMEM;
		goto ealloch;
	}
	host		= mmc_priv(mmc);
	host->mmc	= mmc;
	host->addr	= reg;
	host->timeout	= 1000;

	host->pd = pdev;

	spin_lock_init(&host->lock);

	mmc->ops = &sh_mmcif_ops;
	sh_mmcif_init_ocr(host);

	mmc->caps = MMC_CAP_MMC_HIGHSPEED;
	if (pd && pd->caps)
		mmc->caps |= pd->caps;
	mmc->max_segs = 32;
	mmc->max_blk_size = 512;
	mmc->max_req_size = PAGE_CACHE_SIZE * mmc->max_segs;
	mmc->max_blk_count = mmc->max_req_size / mmc->max_blk_size;
	mmc->max_seg_size = mmc->max_req_size;

	platform_set_drvdata(pdev, host);

	pm_runtime_enable(&pdev->dev);
	host->power = false;

	snprintf(clk_name, sizeof(clk_name), "mmc%d", pdev->id);
	host->hclk = clk_get(&pdev->dev, clk_name);
	if (IS_ERR(host->hclk)) {
		ret = PTR_ERR(host->hclk);
		dev_err(&pdev->dev, "cannot get clock \"%s\": %d\n", clk_name, ret);
		goto eclkget;
	}
	ret = sh_mmcif_clk_update(host);
	if (ret < 0)
		goto eclkupdate;

	ret = pm_runtime_resume(&pdev->dev);
	if (ret < 0)
		goto eresume;

	INIT_DELAYED_WORK(&host->timeout_work, mmcif_timeout_work);

	sh_mmcif_sync_reset(host);
	sh_mmcif_writel(host->addr, MMCIF_CE_INT_MASK, MASK_ALL);

	ret = request_threaded_irq(irq[0], sh_mmcif_intr, sh_mmcif_irqt, 0, "sh_mmc:error", host);
	if (ret) {
		dev_err(&pdev->dev, "request_irq error (sh_mmc:error)\n");
		goto ereqirq0;
	}
	ret = request_threaded_irq(irq[1], sh_mmcif_intr, sh_mmcif_irqt, 0, "sh_mmc:int", host);
	if (ret) {
		dev_err(&pdev->dev, "request_irq error (sh_mmc:int)\n");
		goto ereqirq1;
	}

	if (pd && pd->use_cd_gpio) {
		ret = mmc_gpio_request_cd(mmc, pd->cd_gpio);
		if (ret < 0)
			goto erqcd;
	}

	clk_disable(host->hclk);
	ret = mmc_add_host(mmc);
	if (ret < 0)
		goto emmcaddh;

	dev_pm_qos_expose_latency_limit(&pdev->dev, 100);

	dev_info(&pdev->dev, "driver version %s\n", DRIVER_VERSION);
	dev_dbg(&pdev->dev, "chip ver H'%04x\n",
		sh_mmcif_readl(host->addr, MMCIF_CE_VERSION) & 0x0000ffff);
	return ret;

emmcaddh:
	if (pd && pd->use_cd_gpio)
		mmc_gpio_free_cd(mmc);
erqcd:
	free_irq(irq[1], host);
ereqirq1:
	free_irq(irq[0], host);
ereqirq0:
	pm_runtime_suspend(&pdev->dev);
eresume:
	clk_disable(host->hclk);
eclkupdate:
	clk_put(host->hclk);
eclkget:
	pm_runtime_disable(&pdev->dev);
	mmc_free_host(mmc);
ealloch:
	iounmap(reg);
	return ret;
}
static int __devinit sdhci_pxav3_probe(struct platform_device *pdev)
{
	struct sdhci_pltfm_host *pltfm_host;
	struct sdhci_pxa_platdata *pdata = pdev->dev.platform_data;
	struct device *dev = &pdev->dev;
	struct sdhci_host *host = NULL;
	struct sdhci_pxa *pxa = NULL;
	const struct of_device_id *match;

	int ret;
	struct clk *clk;

	int qos_class = PM_QOS_CPUIDLE_BLOCK;

	pxa = kzalloc(sizeof(struct sdhci_pxa), GFP_KERNEL);
	if (!pxa)
		return -ENOMEM;

	host = sdhci_pltfm_init(pdev, NULL);
	if (IS_ERR(host)) {
		kfree(pxa);
		return PTR_ERR(host);
	}
	pltfm_host = sdhci_priv(host);
	pltfm_host->priv = pxa;

	clk = clk_get(dev, "PXA-SDHCLK");
	if (IS_ERR(clk)) {
		dev_err(dev, "failed to get io clock\n");
		ret = PTR_ERR(clk);
		goto err_clk_get;
	}
	pltfm_host->clk = clk;
	clk_prepare_enable(clk);

	host->quirks = SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK
		| SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC
		| SDHCI_QUIRK_32BIT_ADMA_SIZE
		| SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN;

	match = of_match_device(of_match_ptr(sdhci_pxav3_of_match), &pdev->dev);
	if (match)
		pdata = pxav3_get_mmc_pdata(dev);

	if (pdata) {
		pdata->qos_idle.name = mmc_hostname(host->mmc);
		pm_qos_add_request(&pdata->qos_idle, qos_class,
			PM_QOS_CPUIDLE_BLOCK_DEFAULT_VALUE);

		/* If slot design supports 8 bit data, indicate this to MMC. */
		if (pdata->flags & PXA_FLAG_SD_8_BIT_CAPABLE_SLOT)
			host->mmc->caps |= MMC_CAP_8_BIT_DATA;

		if (pdata->flags & PXA_FLAG_ENABLE_CLOCK_GATING)
			host->mmc->caps2 |= MMC_CAP2_BUS_AUTO_CLK_GATE;

		if (pdata->flags & PXA_FLAG_DISABLE_PROBE_CDSCAN)
			host->mmc->caps2 |= MMC_CAP2_DISABLE_PROBE_CDSCAN;

		if (pdata->quirks)
			host->quirks |= pdata->quirks;
		if (pdata->host_caps)
			host->mmc->caps |= pdata->host_caps;
		if (pdata->host_caps2)
			host->mmc->caps2 |= pdata->host_caps2;
		if (pdata->pm_caps)
			host->mmc->pm_caps |= pdata->pm_caps;

		if (pdata->cd_type != PXA_SDHCI_CD_HOST)
			host->quirks |= SDHCI_QUIRK_BROKEN_CARD_DETECTION;

		if (pdata->cd_type == PXA_SDHCI_CD_GPIO &&
			gpio_is_valid(pdata->ext_cd_gpio)) {
			ret = mmc_gpio_request_cd(host->mmc, pdata->ext_cd_gpio);
			if (ret) {
				dev_err(mmc_dev(host->mmc),
					"failed to allocate card detect gpio\n");
				goto err_cd_req;
			}
		} else if (pdata->cd_type == PXA_SDHCI_CD_PERMANENT) {
			/* on-chip device */
			host->mmc->caps |= MMC_CAP_NONREMOVABLE;
		} else if (pdata->cd_type == PXA_SDHCI_CD_NONE)
			host->mmc->caps |= MMC_CAP_NEEDS_POLL;
	}
	host->quirks2 = SDHCI_QUIRK2_NO_CURRENT_LIMIT
		| SDHCI_QUIRK2_PRESET_VALUE_BROKEN
		| SDHCI_QUIRK2_TIMEOUT_DIVIDE_4;
	host->ops = &pxav3_sdhci_ops;

	if (pdata && pdata->flags & PXA_FLAG_EN_PM_RUNTIME) {
		pm_runtime_set_active(&pdev->dev);
		pm_runtime_enable(&pdev->dev);
		pm_runtime_set_autosuspend_delay(&pdev->dev,
				PXAV3_RPM_DELAY_MS);
		pm_runtime_use_autosuspend(&pdev->dev);
		pm_suspend_ignore_children(&pdev->dev, 1);
	}

#ifdef _MMC_SAFE_ACCESS_
	mmc_is_available = 1;
#endif

	ret = sdhci_add_host(host);
	if (ret) {
		dev_err(&pdev->dev, "failed to add host\n");
		if (pdata && pdata->flags & PXA_FLAG_EN_PM_RUNTIME) {
			pm_runtime_forbid(&pdev->dev);
			pm_runtime_get_noresume(&pdev->dev);
		}
		goto err_add_host;
	}

	/* remove the caps that supported by the controller but not available
	 * for certain platforms.
	 */
	if (pdata && pdata->host_caps_disable)
		host->mmc->caps &= ~(pdata->host_caps_disable);

	platform_set_drvdata(pdev, host);

	if (pdata && pdata->flags & PXA_FLAG_WAKEUP_HOST) {
		device_init_wakeup(&pdev->dev, 1);
		host->mmc->pm_flags |= MMC_PM_WAKE_SDIO_IRQ;
	}
	else
		device_init_wakeup(&pdev->dev, 0);

#ifdef CONFIG_SD8XXX_RFKILL
	if (pdata && pdata->pmmc)
		*pdata->pmmc = host->mmc;
#endif

	return 0;

err_add_host:
	clk_disable_unprepare(clk);
	clk_put(clk);
	if (pdata)
		pm_qos_remove_request(&pdata->qos_idle);
err_cd_req:
	mmc_gpio_free_cd(host->mmc);
err_clk_get:
	sdhci_pltfm_free(pdev);
	kfree(pxa);
	return ret;
}