Beispiel #1
0
static int dw_mci_rockchip_probe(struct platform_device *pdev)
{
	const struct dw_mci_drv_data *drv_data;
	const struct of_device_id *match;
	int ret;

	if (!pdev->dev.of_node)
		return -ENODEV;

	match = of_match_node(dw_mci_rockchip_match, pdev->dev.of_node);
	drv_data = match->data;

	pm_runtime_get_noresume(&pdev->dev);
	pm_runtime_set_active(&pdev->dev);
	pm_runtime_enable(&pdev->dev);
	pm_runtime_set_autosuspend_delay(&pdev->dev, 50);
	pm_runtime_use_autosuspend(&pdev->dev);

	ret = dw_mci_pltfm_register(pdev, drv_data);
	if (ret) {
		pm_runtime_disable(&pdev->dev);
		pm_runtime_set_suspended(&pdev->dev);
		pm_runtime_put_noidle(&pdev->dev);
		return ret;
	}

	pm_runtime_put_autosuspend(&pdev->dev);

	return 0;
}
Beispiel #2
0
/*
***************************************************************
 @Function	  :sunxi_mali_platform_device_register

 @Description :Register mali platform device

 @Input		  :None

 @Return	  :0 or error code
***************************************************************
*/
int sunxi_mali_platform_device_register(void)
{
    int err;
    unsigned long mem_size = 0;
    struct __fb_addr_para fb_addr_para={0};

    sunxi_get_fb_addr_para(&fb_addr_para);

    err = platform_device_add_resources(&mali_gpu_device, mali_gpu_resources, sizeof(mali_gpu_resources) / sizeof(mali_gpu_resources[0]));
    if (0 == err){
        mali_gpu_data.fb_start = fb_addr_para.fb_paddr;
        mali_gpu_data.fb_size = fb_addr_para.fb_size;	
		mem_size = (totalram_pages  * PAGE_SIZE )/1024; /* KB */
	
		if(mem_size > 512*1024)
		{
			mali_gpu_data.shared_mem_size = 1024*1024*1024;
		}
		else
		{
			mali_gpu_data.shared_mem_size = 512*1024*1024;
		}

        err = platform_device_add_data(&mali_gpu_device, &mali_gpu_data, sizeof(mali_gpu_data));
        if(0 == err)
		{
            err = platform_device_register(&mali_gpu_device);
            if (0 == err){
                if(_MALI_OSK_ERR_OK != mali_platform_init())
				{
					return _MALI_OSK_ERR_FAULT;
				}
#ifdef CONFIG_PM_RUNTIME
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,37))
				pm_runtime_set_autosuspend_delay(&(mali_gpu_device.dev), 1000);
				pm_runtime_use_autosuspend(&(mali_gpu_device.dev));
#endif
				pm_runtime_enable(&(mali_gpu_device.dev));
#endif /* CONFIG_PM_RUNTIME */
				/* print mali gpu information */
				printk(KERN_INFO "=========================================================\n");
				printk(KERN_INFO "       Mali GPU Information         \n");
				printk(KERN_INFO "voltage             : %d mV\n", regulator_get_voltage(mali_regulator)/1000);
				printk(KERN_INFO "initial frequency   : %ld MHz\n", clk_get_rate(h_mali_clk)/(1000*1000));
				printk(KERN_INFO "frame buffer address: 0x%lx - 0x%lx\n", mali_gpu_data.fb_start, mali_gpu_data.fb_start + mali_gpu_data.shared_mem_size);
				printk(KERN_INFO "frame buffer size   : %ld MB\n", mali_gpu_data.shared_mem_size/(1024*1024));
				printk(KERN_INFO "=========================================================\n");
                return 0;
            }
        }

        platform_device_unregister(&mali_gpu_device);
    }

#ifdef CONFIG_CPU_BUDGET_THERMAL
	register_budget_cooling_notifier(&mali_throttle_notifier);
#endif /* CONFIG_CPU_BUDGET_THERMAL */
	
    return err;
}
Beispiel #3
0
static int __devinit gr2d_probe(struct platform_device *dev)
{
	int err = 0;
	struct nvhost_device_data *pdata = NULL;

	if (dev->dev.of_node) {
		const struct of_device_id *match;

		match = of_match_device(tegra_gr2d_of_match, &dev->dev);
		if (match)
			pdata = (struct nvhost_device_data *)match->data;
	} else
		pdata = (struct nvhost_device_data *)dev->dev.platform_data;

	WARN_ON(!pdata);
	if (!pdata) {
		dev_info(&dev->dev, "no platform data\n");
		return -ENODATA;
	}
	pdata->pdev = dev;
	platform_set_drvdata(dev, pdata);

	err = nvhost_client_device_init(dev);
	if (err)
		return err;

	pm_runtime_use_autosuspend(&dev->dev);
	pm_runtime_set_autosuspend_delay(&dev->dev, 100);
	pm_runtime_enable(&dev->dev);

	return 0;
}
static int __devinit msenc_probe(struct platform_device *dev)
{
	int err = 0;
	struct nvhost_device_data *pdata =
		(struct nvhost_device_data *)dev->dev.platform_data;

	pdata->pdev = dev;
	pdata->init = nvhost_msenc_init;
	pdata->deinit = nvhost_msenc_deinit;
	pdata->finalize_poweron = nvhost_msenc_finalize_poweron;

	platform_set_drvdata(dev, pdata);

	err = nvhost_client_device_get_resources(dev);
	if (err)
		return err;

	err = nvhost_client_device_init(dev);
	if (err)
		return err;

	pm_runtime_use_autosuspend(&dev->dev);
	pm_runtime_set_autosuspend_delay(&dev->dev, 100);
	pm_runtime_enable(&dev->dev);

	return 0;
}
int mali_platform_device_register(void)
{
	int err;

	MALI_DEBUG_PRINT(4, ("mali_platform_device_register() called\n"));

	/* Connect resources to the device */
	err = platform_device_add_resources(&exynos4_device_g3d, mali_gpu_resources, sizeof(mali_gpu_resources) / sizeof(mali_gpu_resources[0]));
	if (0 == err)
	{
		err = platform_device_add_data(&exynos4_device_g3d, &mali_gpu_data, sizeof(mali_gpu_data));
		if (0 == err)
		{
			mali_platform_init(&(exynos4_device_g3d.dev));
#ifdef CONFIG_PM_RUNTIME
			pm_runtime_set_autosuspend_delay(&(exynos4_device_g3d.dev), 50);
			pm_runtime_use_autosuspend(&(exynos4_device_g3d.dev));
			pm_runtime_enable(&(exynos4_device_g3d.dev));
#endif
			return 0;
		}

	}
	return err;
}
Beispiel #6
0
/**
 * blk_pm_runtime_init - Block layer runtime PM initialization routine
 * @q: the queue of the device
 * @dev: the device the queue belongs to
 *
 * Description:
 *    Initialize runtime-PM-related fields for @q and start auto suspend for
 *    @dev. Drivers that want to take advantage of request-based runtime PM
 *    should call this function after @dev has been initialized, and its
 *    request queue @q has been allocated, and runtime PM for it can not happen
 *    yet(either due to disabled/forbidden or its usage_count > 0). In most
 *    cases, driver should call this function before any I/O has taken place.
 *
 *    This function takes care of setting up using auto suspend for the device,
 *    the autosuspend delay is set to -1 to make runtime suspend impossible
 *    until an updated value is either set by user or by driver. Drivers do
 *    not need to touch other autosuspend settings.
 *
 *    The block layer runtime PM is request based, so only works for drivers
 *    that use request as their IO unit instead of those directly use bio's.
 */
void blk_pm_runtime_init(struct request_queue *q, struct device *dev)
{
	q->dev = dev;
	q->rpm_status = RPM_ACTIVE;
	pm_runtime_set_autosuspend_delay(q->dev, -1);
	pm_runtime_use_autosuspend(q->dev);
}
Beispiel #7
0
/*****************************************************************************
 function name  : mali_platform_device_register
 description    : mali platform device register
 input vars     : void
 output vars    : NA
 return value   : void
 calls          : mali_platform_init

 called         : os

 history        :
  1.data        : 18/10/2012
    modify      : new

*****************************************************************************/
int mali_platform_device_register(void)
{
    int err = -1;

    MALI_DEBUG_PRINT(4, ("mali_platform_device_register() called\n"));
    MALI_DEBUG_PRINT(4, ("Registering Mali-450 MP8 device\n"));

    /* init the mem first of hisi */
    mali_hisi_mem_init();
	mali_gpu_device.num_resources = ARRAY_SIZE(mali_gpu_resources_m450_mp4);
	mali_gpu_device.resource = mali_gpu_resources_m450_mp4;

    /* Register the platform device */
    err = platform_device_register(&mali_gpu_device);
    if (0 == err)
    {
        mali_platform_init();
#ifdef CONFIG_PM_RUNTIME
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,37))
        pm_runtime_set_autosuspend_delay(&(mali_gpu_device.dev), 1000);
        pm_runtime_use_autosuspend(&(mali_gpu_device.dev));
#endif
        pm_runtime_enable(&(mali_gpu_device.dev));
#endif

        return 0;
    }

    return err;
}
Beispiel #8
0
int mali_platform_device_register(void)
{
	int err;

	MALI_DEBUG_PRINT(4, ("mali_platform_device_register() called\n"));

	/* Connect resources to the device */
	err = platform_device_add_resources(&exynos4_device_g3d,
						mali_gpu_resources,
						sizeof(mali_gpu_resources) /
						sizeof(mali_gpu_resources[0]));
	if (0 == err) {
		err = platform_device_add_data(&exynos4_device_g3d,
						&mali_gpu_data,
						sizeof(mali_gpu_data));
		if (0 == err) {
			mali_platform_init(&(exynos4_device_g3d.dev));

#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,37))
			pm_runtime_set_autosuspend_delay(&(exynos4_device_g3d.dev), 200);
			pm_runtime_use_autosuspend(&(exynos4_device_g3d.dev));
#endif
			pm_runtime_enable(&(exynos4_device_g3d.dev));
			return 0;
		}

	}
	return err;
}
int mali_platform_device_register(void)
{
    int err = -1;

    MALI_DEBUG_PRINT(1, ("%s\n", __FUNCTION__));

    err = platform_device_register(&mali_gpu_device);
            
    if (0 == err) 
    {         
        mali_pmm_init();
        
#ifdef CONFIG_PM_RUNTIME
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,37))
				pm_runtime_set_autosuspend_delay(&(mali_gpu_device.dev), 1000);
				pm_runtime_use_autosuspend(&(mali_gpu_device.dev));
#endif
				pm_runtime_enable(&(mali_gpu_device.dev));
#endif
        
#if defined(__MALI_CORE_SCALING_ENABLE__)
        mali_core_scaling_init(num_pp_cores);
#endif        
        return 0;
    }

    MALI_DEBUG_PRINT(1, ("%s err=%d\n",__FUNCTION__, err));

    platform_device_unregister(&mali_gpu_device);

    return err;
}
void escore_pm_enable(void)
{
	struct escore_priv *escore = &escore_priv;
	int ret = 0;

	dev_dbg(escore->dev, "%s()\n", __func__);

	if (escore->pm_enable) {
		pr_err("%s(): Already Enabled\n", __func__);
		return;
	}

	escore->pm_enable = ES_PM_ON;
	escore->pm_status = ES_PM_ON;
	pm_runtime_set_active(escore->dev);
	pm_runtime_mark_last_busy(escore->dev);
	pm_runtime_set_autosuspend_delay(escore->dev, ES_PM_AUTOSUSPEND_DELAY);
	pm_runtime_use_autosuspend(escore->dev);
	pm_runtime_enable(escore->dev);
	device_init_wakeup(escore->dev, true);
	if (pm_runtime_get_sync(escore->dev) >= 0) {
		ret = pm_runtime_put_sync_autosuspend(escore->dev);
		if (ret < 0) {
			dev_err(escore->dev,
				"%s() escore PM put failed ret = %d\n",
				__func__, ret);
		}
	} else
		dev_err(escore->dev,
			"%s() escore PM get failed ret = %d\n", __func__, ret);
	return;
}
Beispiel #11
0
/**
 * tb_domain_add() - Add domain to the system
 * @tb: Domain to add
 *
 * Starts the domain and adds it to the system. Hotplugging devices will
 * work after this has been returned successfully. In order to remove
 * and release the domain after this function has been called, call
 * tb_domain_remove().
 *
 * Return: %0 in case of success and negative errno in case of error
 */
int tb_domain_add(struct tb *tb)
{
	int ret;

	if (WARN_ON(!tb->cm_ops))
		return -EINVAL;

	mutex_lock(&tb->lock);

	tb->ctl = tb_ctl_alloc(tb->nhi, tb_domain_event_cb, tb);
	if (!tb->ctl) {
		ret = -ENOMEM;
		goto err_unlock;
	}

	/*
	 * tb_schedule_hotplug_handler may be called as soon as the config
	 * channel is started. Thats why we have to hold the lock here.
	 */
	tb_ctl_start(tb->ctl);

	if (tb->cm_ops->driver_ready) {
		ret = tb->cm_ops->driver_ready(tb);
		if (ret)
			goto err_ctl_stop;
	}

	ret = device_add(&tb->dev);
	if (ret)
		goto err_ctl_stop;

	/* Start the domain */
	if (tb->cm_ops->start) {
		ret = tb->cm_ops->start(tb);
		if (ret)
			goto err_domain_del;
	}

	/* This starts event processing */
	mutex_unlock(&tb->lock);

	pm_runtime_no_callbacks(&tb->dev);
	pm_runtime_set_active(&tb->dev);
	pm_runtime_enable(&tb->dev);
	pm_runtime_set_autosuspend_delay(&tb->dev, TB_AUTOSUSPEND_DELAY);
	pm_runtime_mark_last_busy(&tb->dev);
	pm_runtime_use_autosuspend(&tb->dev);

	return 0;

err_domain_del:
	device_del(&tb->dev);
err_ctl_stop:
	tb_ctl_stop(tb->ctl);
err_unlock:
	mutex_unlock(&tb->lock);

	return ret;
}
Beispiel #12
0
static int omap3_thermal_probe(struct platform_device *pdev)
{
    struct thermal_zone_device *omap3_thermal = NULL;
    struct omap3_thermal_dev *tdev;
    int ret = 0;
    struct resource *stres = platform_get_resource(pdev, IORESOURCE_MEM, 0);

    if (!stres) {
        dev_err(&pdev->dev, "memory resource missing\n");
        return -ENODEV;
    }

    tdev = devm_kzalloc(&pdev->dev, sizeof(*tdev), GFP_KERNEL);
    if (!tdev)
        return -ENOMEM;

    tdev->dev = &pdev->dev;

    if (cpu_is_omap3630()) {
        tdev->bgap_soc_mask = BIT(9);
        tdev->bgap_eocz_mask = BIT(8);
        tdev->adc_to_temp = omap3630_adc_to_temp;
    } else if (cpu_is_omap34xx()) {
        tdev->bgap_soc_mask = BIT(8);
        tdev->bgap_eocz_mask = BIT(7);
        tdev->adc_to_temp = omap3530_adc_to_temp;
    } else {
        dev_err(&pdev->dev, "not OMAP3 family\n");
        return -ENODEV;
    }

    tdev->thermal_base = devm_ioremap(&pdev->dev, stres->start,
                                      resource_size(stres));
    if (!tdev->thermal_base) {
        dev_err(&pdev->dev, "ioremap failed\n");
        return -ENOMEM;
    }

    pm_runtime_enable(&pdev->dev);
    pm_runtime_set_autosuspend_delay(&pdev->dev, 2000);
    pm_runtime_use_autosuspend(&pdev->dev);

    omap3_thermal = thermal_zone_device_register("omap3-thermal", 0,
                    tdev, &omap3_thermal_ops, 0, 0, 0, 0);
    if (!omap3_thermal) {
        dev_err(&pdev->dev, "thermal zone device is NULL\n");
        ret = -EINVAL;
        goto put_pm;
    }

    platform_set_drvdata(pdev, omap3_thermal);

    return 0;

put_pm:
    pm_runtime_disable(&pdev->dev);
    return ret;
}
Beispiel #13
0
/**
 * amdgpu_driver_load_kms - Main load function for KMS.
 *
 * @dev: drm dev pointer
 * @flags: device flags
 *
 * This is the main load function for KMS (all asics).
 * Returns 0 on success, error on failure.
 */
int amdgpu_driver_load_kms(struct drm_device *dev, unsigned long flags)
{
	struct amdgpu_device *adev;
	int r, acpi_status;

	adev = kzalloc(sizeof(struct amdgpu_device), GFP_KERNEL);
	if (adev == NULL) {
		return -ENOMEM;
	}
	dev->dev_private = (void *)adev;

	if ((amdgpu_runtime_pm != 0) &&
	    amdgpu_has_atpx() &&
	    ((flags & AMD_IS_APU) == 0))
		flags |= AMD_IS_PX;

	/* amdgpu_device_init should report only fatal error
	 * like memory allocation failure or iomapping failure,
	 * or memory manager initialization failure, it must
	 * properly initialize the GPU MC controller and permit
	 * VRAM allocation
	 */
	r = amdgpu_device_init(adev, dev, dev->pdev, flags);
	if (r) {
		dev_err(&dev->pdev->dev, "Fatal error during GPU init\n");
		goto out;
	}

	/* Call ACPI methods: require modeset init
	 * but failure is not fatal
	 */
	if (!r) {
		acpi_status = amdgpu_acpi_init(adev);
		if (acpi_status)
		dev_dbg(&dev->pdev->dev,
				"Error during ACPI methods call\n");
	}

	amdgpu_amdkfd_load_interface(adev);
	amdgpu_amdkfd_device_probe(adev);
	amdgpu_amdkfd_device_init(adev);

	if (amdgpu_device_is_px(dev)) {
		pm_runtime_use_autosuspend(dev->dev);
		pm_runtime_set_autosuspend_delay(dev->dev, 5000);
		pm_runtime_set_active(dev->dev);
		pm_runtime_allow(dev->dev);
		pm_runtime_mark_last_busy(dev->dev);
		pm_runtime_put_autosuspend(dev->dev);
	}

out:
	if (r)
		amdgpu_driver_unload_kms(dev);


	return r;
}
static int hi3630_pcm_hdmi_probe(struct platform_device *pdev)
{
	int ret = -1;
	struct device *dev = &pdev->dev;
	struct hi3630_hdmi_data *pdata = NULL;

	if (!dev) {
		loge("platform_device has no device\n");
		return -ENOENT;
	}

	pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
	if (!pdata){
		dev_err(dev, "cannot allocate hi3630 hdmi platform data\n");
		return -ENOMEM;
	}

	pdata->hi3630_asp_irq = dev_get_drvdata(pdev->dev.parent);
	if (!pdata->hi3630_asp_irq) {
		dev_err(dev, "get parent device error\n");
		return -ENOENT;
	}

	pdata->irq = platform_get_irq_byname(pdev, "asp_irq_hdmi");
	if (0 > pdata->irq) {
		dev_err(dev, "cannot get irq\n");
		return -ENOENT;
	}

	pdata->regu.supply = "hdmi-pcm";
	ret = devm_regulator_bulk_get(dev, 1, &(pdata->regu));
	if (0 != ret) {
		dev_err(dev, "couldn't get regulators %d\n", ret);
		return -ENOENT;
	}

	pdata->dev = dev;

#ifdef CONFIG_PM_RUNTIME
	pm_runtime_set_autosuspend_delay(dev, 100); /* 100ms*/
	pm_runtime_use_autosuspend(dev);

	pm_runtime_enable(dev);
#endif

	platform_set_drvdata(pdev, pdata);

	dev_set_name(dev, "hi3630-pcm-hdmi");

	ret = snd_soc_register_platform(dev, &hi3630_pcm_hdmi_platform);
	if (ret) {
		loge("snd_soc_register_platform return %d\n", ret);
		return -ENODEV;
	}

	return ret;
}
Beispiel #15
0
Datei: pm.c Projekt: avagin/linux
void wil_pm_runtime_allow(struct wil6210_priv *wil)
{
	struct device *dev = wil_to_dev(wil);

	pm_runtime_put_noidle(dev);
	pm_runtime_set_autosuspend_delay(dev, WIL6210_AUTOSUSPEND_DELAY_MS);
	pm_runtime_use_autosuspend(dev);
	pm_runtime_allow(dev);
}
Beispiel #16
0
static int pa12203001_probe(struct i2c_client *client,
			    const struct i2c_device_id *id)
{
	struct pa12203001_data *data;
	struct iio_dev *indio_dev;
	int ret;

	indio_dev = devm_iio_device_alloc(&client->dev,
					  sizeof(struct pa12203001_data));
	if (!indio_dev)
		return -ENOMEM;

	data = iio_priv(indio_dev);
	i2c_set_clientdata(client, indio_dev);
	data->client = client;

	data->map = devm_regmap_init_i2c(client, &pa12203001_regmap_config);
	if (IS_ERR(data->map))
		return PTR_ERR(data->map);

	mutex_init(&data->lock);

	indio_dev->dev.parent = &client->dev;
	indio_dev->info = &pa12203001_info;
	indio_dev->name = PA12203001_DRIVER_NAME;
	indio_dev->channels = pa12203001_channels;
	indio_dev->num_channels = ARRAY_SIZE(pa12203001_channels);
	indio_dev->modes = INDIO_DIRECT_MODE;

	ret = pa12203001_init(indio_dev);
	if (ret < 0)
		return ret;

	ret = pa12203001_power_chip(indio_dev, PA12203001_CHIP_ENABLE);
	if (ret < 0)
		return ret;

	ret = pm_runtime_set_active(&client->dev);
	if (ret < 0)
		goto out_err;

	pm_runtime_enable(&client->dev);
	pm_runtime_set_autosuspend_delay(&client->dev,
					 PA12203001_SLEEP_DELAY_MS);
	pm_runtime_use_autosuspend(&client->dev);

	ret = iio_device_register(indio_dev);
	if (ret < 0)
		goto out_err;

	return 0;

out_err:
	pa12203001_power_chip(indio_dev, PA12203001_CHIP_DISABLE);
	return ret;
}
Beispiel #17
0
int mali_platform_device_register(void)
{
    int err;

    MALI_DEBUG_PRINT(4, ("mali_platform_device_register() called\n"));

#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,4,0)
    exynos_pm_add_dev_to_genpd(&mali_gpu_device, &exynos4_pd_g3d);
#endif

    /* Connect resources to the device */
    err = platform_device_add_resources(&mali_gpu_device, mali_gpu_resources, sizeof(mali_gpu_resources) / sizeof(mali_gpu_resources[0]));
    if (0 == err)
    {
        err = platform_device_add_data(&mali_gpu_device, &mali_gpu_data, sizeof(mali_gpu_data));
        if (0 == err)
        {
#ifdef CONFIG_PM_RUNTIME
#if defined(USE_PM_NOTIFIER)
            err = register_pm_notifier(&mali_pwr_notif_block);
            if (err)
            {
                goto plat_init_err;
            }
#endif
#endif /* CONFIG_PM_RUNTIME */

            /* Register the platform device */
            err = platform_device_register(&mali_gpu_device);
            if (0 == err)
            {
                mali_platform_init(&(mali_gpu_device.dev));

#ifdef CONFIG_PM_RUNTIME
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,37))
                pm_runtime_set_autosuspend_delay(&(mali_gpu_device.dev), 1000);
                pm_runtime_use_autosuspend(&(mali_gpu_device.dev));
#endif
                pm_runtime_enable(&(mali_gpu_device.dev));
#endif

                return 0;
            }
        }

#ifdef CONFIG_PM_RUNTIME
#if defined(USE_PM_NOTIFIER)
plat_init_err:
        unregister_pm_notifier(&mali_pwr_notif_block);
#endif
#endif /* CONFIG_PM_RUNTIME */
        platform_device_unregister(&mali_gpu_device);
    }

    return err;
}
void mali_pdev_post_init(struct platform_device* pdev)
{
#ifdef CONFIG_PM_RUNTIME
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,37))
	pm_runtime_set_autosuspend_delay(&(pdev->dev), 1000);
	pm_runtime_use_autosuspend(&(pdev->dev));
#endif
	pm_runtime_enable(&(pdev->dev));
#endif
	mali_meson_init_finish(pdev);
}
static int mlx90614_probe(struct i2c_client *client,
			 const struct i2c_device_id *id)
{
	struct iio_dev *indio_dev;
	struct mlx90614_data *data;
	int ret;

	if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_WORD_DATA))
		return -ENODEV;

	indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*data));
	if (!indio_dev)
		return -ENOMEM;

	data = iio_priv(indio_dev);
	i2c_set_clientdata(client, indio_dev);
	data->client = client;
	mutex_init(&data->lock);
	data->wakeup_gpio = mlx90614_probe_wakeup(client);

	mlx90614_wakeup(data);

	indio_dev->dev.parent = &client->dev;
	indio_dev->name = id->name;
	indio_dev->modes = INDIO_DIRECT_MODE;
	indio_dev->info = &mlx90614_info;

	ret = mlx90614_probe_num_ir_sensors(client);
	switch (ret) {
	case 0:
		dev_dbg(&client->dev, "Found single sensor");
		indio_dev->channels = mlx90614_channels;
		indio_dev->num_channels = 2;
		break;
	case 1:
		dev_dbg(&client->dev, "Found dual sensor");
		indio_dev->channels = mlx90614_channels;
		indio_dev->num_channels = 3;
		break;
	default:
		return ret;
	}

	if (data->wakeup_gpio) {
		pm_runtime_set_autosuspend_delay(&client->dev,
						 MLX90614_AUTOSLEEP_DELAY);
		pm_runtime_use_autosuspend(&client->dev);
		pm_runtime_set_active(&client->dev);
		pm_runtime_enable(&client->dev);
	}

	return iio_device_register(indio_dev);
}
static int kbase_rk_power_runtime_init_callback(
		struct kbase_device *kbdev)
{
	if (!kbdev->regulator)
		return -ENODEV;

	pm_runtime_set_autosuspend_delay(kbdev->dev, 200);
	pm_runtime_use_autosuspend(kbdev->dev);

	pm_runtime_set_active(kbdev->dev);
	pm_runtime_enable(kbdev->dev);

	return 0;
}
Beispiel #21
0
int hid_sensor_setup_trigger(struct iio_dev *indio_dev, const char *name,
				struct hid_sensor_common *attrb)
{
	int ret;
	struct iio_trigger *trig;

	trig = iio_trigger_alloc("%s-dev%d", name, indio_dev->id);
	if (trig == NULL) {
		dev_err(&indio_dev->dev, "Trigger Allocate Failed\n");
		ret = -ENOMEM;
		goto error_ret;
	}

	trig->dev.parent = indio_dev->dev.parent;
	iio_trigger_set_drvdata(trig, attrb);
	trig->ops = &hid_sensor_trigger_ops;
	ret = iio_trigger_register(trig);

	if (ret) {
		dev_err(&indio_dev->dev, "Trigger Register Failed\n");
		goto error_free_trig;
	}
	attrb->trigger = trig;
	indio_dev->trig = iio_trigger_get(trig);

	ret = pm_runtime_set_active(&indio_dev->dev);
	if (ret)
		goto error_unreg_trigger;

	iio_device_set_drvdata(indio_dev, attrb);
	pm_suspend_ignore_children(&attrb->pdev->dev, true);
	pm_runtime_enable(&attrb->pdev->dev);
	/* Default to 3 seconds, but can be changed from sysfs */
	pm_runtime_set_autosuspend_delay(&attrb->pdev->dev,
					 3000);
	pm_runtime_use_autosuspend(&attrb->pdev->dev);

	return ret;
error_unreg_trigger:
	iio_trigger_unregister(trig);
error_free_trig:
	iio_trigger_free(trig);
error_ret:
	return ret;
}
int mali_platform_device_register(void)
{
	int err = -1;

#	if MESON_CPU_TYPE == MESON_CPU_TYPE_MESON6
	static_pp_mmu_cnt = 1;
#	endif

	MALI_DEBUG_PRINT(4, ("mali_platform_device_register() called\n"));

	/* Detect present Mali GPU and connect the correct resources to the device */
	
	MALI_DEBUG_PRINT(4, ("Registering Mali-450 MP8 device\n"));
	err = platform_device_add_resources(&mali_gpu_device, meson_mali_resources, sizeof(meson_mali_resources) / sizeof(meson_mali_resources[0]));

	if (0 == err)
	{
		err = platform_device_add_data(&mali_gpu_device, &mali_gpu_data, sizeof(mali_gpu_data));
		if (0 == err)
		{
			/* Register the platform device */
			err = platform_device_register(&mali_gpu_device);
			if (0 == err)
			{
				mali_platform_init();
#ifdef CONFIG_PM_RUNTIME
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,37))
				pm_runtime_set_autosuspend_delay(&(mali_gpu_device.dev), 1000);
				pm_runtime_use_autosuspend(&(mali_gpu_device.dev));
#endif
				pm_runtime_enable(&(mali_gpu_device.dev));
#endif

				return 0;
			}
		}

		platform_device_unregister(&mali_gpu_device);
	}

	return err;
}
static void __devinit sdhci_pltfm_runtime_pm_init(struct device *device)
{
	struct sdio_dev *dev =
		platform_get_drvdata(to_platform_device(device));

	if (!sdhci_pltfm_rpm_enabled(dev))
		return;

	pm_runtime_irq_safe(device);
	pm_runtime_enable(device);

	if (dev->devtype == SDIO_DEV_TYPE_WIFI)
		pm_runtime_set_autosuspend_delay(device,
				KONA_MMC_WIFI_AUTOSUSPEND_DELAY);
	else
		pm_runtime_set_autosuspend_delay(device,
				KONA_MMC_AUTOSUSPEND_DELAY);

	pm_runtime_use_autosuspend(device);
}
/* RPM init */
int i915_rpm_init(struct drm_device *drm_dev)
{
	int ret = 0;
	struct device *dev = drm_dev->dev;
	struct drm_i915_private *dev_priv = drm_dev->dev_private;
	ret = i915_rpm_procfs_init(drm_dev);
	if (ret) {
		DRM_ERROR("unable to initialize procfs entry");
	}
	ret = pm_runtime_set_active(dev);
	dev_priv->rpm.ring_active = false;
	atomic_set(&dev_priv->rpm.procfs_count, 0);
	pm_runtime_allow(dev);
	/* enable Auto Suspend */
	pm_runtime_set_autosuspend_delay(dev, RPM_AUTOSUSPEND_DELAY);
	pm_runtime_use_autosuspend(dev);
	if (dev->power.runtime_error)
		DRM_ERROR("rpm init: error = %d\n", dev->power.runtime_error);

	return ret;
}
Beispiel #25
0
static int exynos_rng_probe(struct platform_device *pdev)
{
	struct exynos_rng *exynos_rng;
	struct resource *res;
	int ret;

	exynos_rng = devm_kzalloc(&pdev->dev, sizeof(struct exynos_rng),
					GFP_KERNEL);
	if (!exynos_rng)
		return -ENOMEM;

	exynos_rng->dev = &pdev->dev;
	exynos_rng->rng.name = "exynos";
	exynos_rng->rng.init =	exynos_init;
	exynos_rng->rng.read = exynos_read;
	exynos_rng->clk = devm_clk_get(&pdev->dev, "secss");
	if (IS_ERR(exynos_rng->clk)) {
		dev_err(&pdev->dev, "Couldn't get clock.\n");
		return -ENOENT;
	}

	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
	exynos_rng->mem = devm_ioremap_resource(&pdev->dev, res);
	if (IS_ERR(exynos_rng->mem))
		return PTR_ERR(exynos_rng->mem);

	platform_set_drvdata(pdev, exynos_rng);

	pm_runtime_set_autosuspend_delay(&pdev->dev, EXYNOS_AUTOSUSPEND_DELAY);
	pm_runtime_use_autosuspend(&pdev->dev);
	pm_runtime_enable(&pdev->dev);

	ret = devm_hwrng_register(&pdev->dev, &exynos_rng->rng);
	if (ret) {
		pm_runtime_dont_use_autosuspend(&pdev->dev);
		pm_runtime_disable(&pdev->dev);
	}

	return ret;
}
Beispiel #26
0
static int probe(struct platform_device *pdev)
{
	int ret;

	dev_err(&pdev->dev, "%s\n", __func__);

	ret = sysfs_create_group(&pdev->dev.kobj, &attr_group);
	if (ret)
		return ret;

	pm_runtime_set_suspended(&pdev->dev);
	pm_runtime_set_autosuspend_delay(&pdev->dev, 5000);
	pm_runtime_use_autosuspend(&pdev->dev);
	pm_runtime_enable(&pdev->dev);

	ret = pm_runtime_resume(&pdev->dev);
	dev_err(&pdev->dev, "pm_runtime_resume() returned %d\n", ret);

	dev_err(&pdev->dev, "%s() returns %d\n", __func__, ret);

	return ret;
}
Beispiel #27
0
static enum MHI_STATUS process_sbl_transition(
				struct mhi_device_ctxt *mhi_dev_ctxt,
				enum STATE_TRANSITION cur_work_item)
{
	int r;
	mhi_log(MHI_MSG_INFO, "Processing SBL state transition\n");

	pm_runtime_set_autosuspend_delay(&mhi_dev_ctxt->dev_info->plat_dev->dev,
					 MHI_RPM_AUTOSUSPEND_TMR_VAL_MS);
	pm_runtime_use_autosuspend(&mhi_dev_ctxt->dev_info->plat_dev->dev);
	r = pm_runtime_set_active(&mhi_dev_ctxt->dev_info->plat_dev->dev);
	if (r) {
		mhi_log(MHI_MSG_ERROR,
		"Failed to activate runtime pm ret %d\n", r);
	}
	pm_runtime_enable(&mhi_dev_ctxt->dev_info->plat_dev->dev);
	mhi_log(MHI_MSG_INFO, "Enabled runtime pm\n");
	mhi_dev_ctxt->dev_exec_env = MHI_EXEC_ENV_SBL;
	wmb();
	enable_clients(mhi_dev_ctxt, mhi_dev_ctxt->dev_exec_env);
	return MHI_STATUS_SUCCESS;
}
static int serial_omap_probe(struct platform_device *pdev)
{
	struct uart_omap_port	*up;
	struct resource		*mem, *irq, *dma_tx, *dma_rx;
	struct omap_uart_port_info *omap_up_info = pdev->dev.platform_data;
	int ret = -ENOSPC;

	if (pdev->dev.of_node)
		omap_up_info = of_get_uart_port_info(&pdev->dev);

	mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
	if (!mem) {
		dev_err(&pdev->dev, "no mem resource?\n");
		return -ENODEV;
	}

	irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
	if (!irq) {
		dev_err(&pdev->dev, "no irq resource?\n");
		return -ENODEV;
	}

	if (!devm_request_mem_region(&pdev->dev, mem->start, resource_size(mem),
				pdev->dev.driver->name)) {
		dev_err(&pdev->dev, "memory region already claimed\n");
		return -EBUSY;
	}

	dma_rx = platform_get_resource_byname(pdev, IORESOURCE_DMA, "rx");
	if (!dma_rx)
		return -ENXIO;

	dma_tx = platform_get_resource_byname(pdev, IORESOURCE_DMA, "tx");
	if (!dma_tx)
		return -ENXIO;

	up = devm_kzalloc(&pdev->dev, sizeof(*up), GFP_KERNEL);
	if (!up)
		return -ENOMEM;

	up->pdev = pdev;
	up->port.dev = &pdev->dev;
	up->port.type = PORT_OMAP;
	up->port.iotype = UPIO_MEM;
	up->port.irq = irq->start;

	up->port.regshift = 2;
	up->port.fifosize = 64;
	up->port.ops = &serial_omap_pops;

	if (pdev->dev.of_node)
		up->port.line = of_alias_get_id(pdev->dev.of_node, "serial");
	else
		up->port.line = pdev->id;

	if (up->port.line < 0) {
		dev_err(&pdev->dev, "failed to get alias/pdev id, errno %d\n",
								up->port.line);
		ret = -ENODEV;
		goto err_port_line;
	}

	sprintf(up->name, "OMAP UART%d", up->port.line);
	up->port.mapbase = mem->start;
	up->port.membase = devm_ioremap(&pdev->dev, mem->start,
						resource_size(mem));
	if (!up->port.membase) {
		dev_err(&pdev->dev, "can't ioremap UART\n");
		ret = -ENOMEM;
		goto err_ioremap;
	}

	up->port.flags = omap_up_info->flags;
	up->port.uartclk = omap_up_info->uartclk;
	if (!up->port.uartclk) {
		up->port.uartclk = DEFAULT_CLK_SPEED;
		dev_warn(&pdev->dev, "No clock speed specified: using default:"
						"%d\n", DEFAULT_CLK_SPEED);
	}
	up->uart_dma.uart_base = mem->start;
	up->errata = omap_up_info->errata;

	if (omap_up_info->dma_enabled) {
		up->uart_dma.uart_dma_tx = dma_tx->start;
		up->uart_dma.uart_dma_rx = dma_rx->start;
		up->use_dma = 1;
		up->uart_dma.rx_buf_size = omap_up_info->dma_rx_buf_size;
		up->uart_dma.rx_timeout = omap_up_info->dma_rx_timeout;
		up->uart_dma.rx_poll_rate = omap_up_info->dma_rx_poll_rate;
		spin_lock_init(&(up->uart_dma.tx_lock));
		spin_lock_init(&(up->uart_dma.rx_lock));
		up->uart_dma.tx_dma_channel = OMAP_UART_DMA_CH_FREE;
		up->uart_dma.rx_dma_channel = OMAP_UART_DMA_CH_FREE;
	}

	up->latency = PM_QOS_CPU_DMA_LAT_DEFAULT_VALUE;
	up->calc_latency = PM_QOS_CPU_DMA_LAT_DEFAULT_VALUE;
	pm_qos_add_request(&up->pm_qos_request,
		PM_QOS_CPU_DMA_LATENCY, up->latency);
	serial_omap_uart_wq = create_singlethread_workqueue(up->name);
	INIT_WORK(&up->qos_work, serial_omap_uart_qos_work);

	pm_runtime_use_autosuspend(&pdev->dev);
	pm_runtime_set_autosuspend_delay(&pdev->dev,
			omap_up_info->autosuspend_timeout);

	pm_runtime_irq_safe(&pdev->dev);
	pm_runtime_enable(&pdev->dev);
	pm_runtime_get_sync(&pdev->dev);

	ui[up->port.line] = up;
	serial_omap_add_console_port(up);

	ret = uart_add_one_port(&serial_omap_reg, &up->port);
	if (ret != 0)
		goto err_add_port;

	pm_runtime_put(&pdev->dev);
	platform_set_drvdata(pdev, up);
	return 0;

err_add_port:
	pm_runtime_put(&pdev->dev);
	pm_runtime_disable(&pdev->dev);
err_ioremap:
err_port_line:
	dev_err(&pdev->dev, "[UART%d]: failure [%s]: %d\n",
				pdev->id, __func__, ret);
	return ret;
}
Beispiel #29
0
/*------------------------------------------------------------------------------
 * register cpu platform devices
 */
void __init nxp_cpu_devices_register(void)
{
    int i = 0;
    printk("[Register machine platform devices]\n");
#if defined(CONFIG_ARM_AMBA)
    for (i = 0; i < ARRAY_SIZE(amba_devices); i++) {
        struct amba_device *d = amba_devices[i];
        printk("mach: add amba device %s \n", d->dev.init_name);
        amba_device_register(d, &iomem_resource);
    }
#endif
    /* default uart hw prepare */
#if defined(CONFIG_SERIAL_NXP_UART0)
    NX_UART_CH_INIT(0);
#endif
#if defined(CONFIG_SERIAL_NXP_UART1)
    NX_UART_CH_INIT(1);
#endif
#if defined(CONFIG_SERIAL_NXP_UART2)
    NX_UART_CH_INIT(2);
#endif
#if defined(CONFIG_SERIAL_NXP_UART3)
    NX_UART_CH_INIT(3);
#endif
#if defined(CONFIG_SERIAL_NXP_UART4)
    NX_UART_CH_INIT(4);
#endif
#if defined(CONFIG_SERIAL_NXP_UART5)
    NX_UART_CH_INIT(5);
#endif

#if defined(CONFIG_NXP_DISPLAY)
    printk("mach: add device syncgen [%d]\n", ARRAY_SIZE(syncgen_devices));
    platform_add_devices(syncgen_devices, ARRAY_SIZE(syncgen_devices));
#endif

#if defined(CONFIG_NXP_DISPLAY_LCD)
    printk("mach: add device lcd \n");
    platform_device_register(&lcd_device);
#endif

#if defined(CONFIG_NXP_DISPLAY_LVDS)
    printk("mach: add device lvds \n");
    platform_device_register(&lvds_device);
#endif

#if defined(CONFIG_NXP_DISPLAY_MIPI)
    printk("mach: add device mipi \n");
    platform_device_register(&mipi_device);
#endif

#if defined(CONFIG_NXP_DISPLAY_HDMI)
    printk("mach: add device hdmi \n");
    platform_device_register(&hdmi_device);
#endif

#if defined(CONFIG_NXP_DISPLAY_RESC)
    printk("mach: add device resolution convertor \n");
    platform_device_register(&resc_device);
#endif

#if defined(CONFIG_SERIAL_NXP)
    printk("mach: add device serial (array:%d)\n", ARRAY_SIZE(uart_devices));
    platform_add_devices(uart_devices, ARRAY_SIZE(uart_devices));
#endif

#if defined(CONFIG_I2C_NXP)
    printk("mach: add device i2c bus (array:%d) \n", ARRAY_SIZE(i2c_devices));
    platform_add_devices(i2c_devices, ARRAY_SIZE(i2c_devices));
#endif
#if defined(CONFIG_RTC_DRV_NXP)
    printk("mach: add device Real Time Clock  \n");
    platform_device_register(&rtc_plat_device);
#endif

#if defined(CONFIG_HAVE_PWM)
    printk("mach: add device generic pwm (array:%d)\n", ARRAY_SIZE(pwm_devices));
    platform_add_devices(pwm_devices, ARRAY_SIZE(pwm_devices));
#endif

#if defined(CONFIG_GPIO_NXP)
    printk("mach: add device generic gpio (array:%d)\n", ARRAY_SIZE(gpio_devices));
    platform_add_devices(gpio_devices, ARRAY_SIZE(gpio_devices));
#endif

#if defined(CONFIG_SND_NXP_I2S) || defined(CONFIG_SND_NXP_I2S_MODULE)
    printk("mach: add device i2s (array:%d) \n", ARRAY_SIZE(i2s_devices));
    platform_add_devices(i2s_devices, ARRAY_SIZE(i2s_devices));
#endif

#if defined(CONFIG_SND_NXP_SPDIF_TX) || defined(CONFIG_SND_NXP_SPDIF_TX_MODULE)
    printk("mach: add device spdif tx\n");
    platform_device_register(&spdif_device_tx);
#endif

#if defined(CONFIG_SND_NXP_SPDIF_RX) || defined(CONFIG_SND_NXP_SPDIF_RX_MODULE)
    printk("mach: add device spdif rx\n");
    platform_device_register(&spdif_device_rx);
#endif

#if defined(CONFIG_SND_NXP_PDM) || defined(CONFIG_SND_NXP_PDM_MODULE)
    printk("mach: add device pdm\n");
    platform_device_register(&pdm_device);
#endif

#if defined(CONFIG_USB_EHCI_SYNOPSYS)
    printk("mach: add device usb_ehci\n");
    platform_device_register(&nxp_device_ehci);
#endif

#if defined(CONFIG_USB_OHCI_SYNOPSYS)
    printk("mach: add device usb_ohci\n");
    platform_device_register(&nxp_device_ohci);
#endif

#if defined(CONFIG_USB_DWCOTG)
    printk("mach: add device usb otg\n");
    platform_device_register(&otg_plat_device);
#endif

#if defined(CONFIG_ION_NXP)
    printk("mach: add device ion-nxp\n");
    nxp_ion_set_platdata();
    platform_device_register(&nxp_device_ion);
#endif

#if defined(CONFIG_NXP_ADC)
    printk("mach: add device adc\n");
    platform_device_register(&nxp_adc_device);
#endif
    /* Register the platform devices */
    printk("mach: add graphic device opengl|es\n");
    platform_device_register(&vr_gpu_device);

#if defined(CONFIG_NXP_WDT)
    printk("mach: add device watchdog\n");
    platform_device_register(&nxp_device_wdt);
#endif

#ifdef CONFIG_PM_RUNTIME
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,37))
    pm_runtime_set_autosuspend_delay(&(vr_gpu_device.dev), 1000);
    pm_runtime_use_autosuspend(&(vr_gpu_device.dev));
#endif
    pm_runtime_enable(&(vr_gpu_device.dev));
#endif
}
Beispiel #30
0
static int sdhci_s3c_probe(struct platform_device *pdev)
{
	struct s3c_sdhci_platdata *pdata;
	struct sdhci_s3c_drv_data *drv_data;
	struct device *dev = &pdev->dev;
	struct sdhci_host *host;
	struct sdhci_s3c *sc;
	struct resource *res;
	int ret, irq, ptr, clks;

	if (!pdev->dev.platform_data && !pdev->dev.of_node) {
		dev_err(dev, "no device data specified\n");
		return -ENOENT;
	}

	irq = platform_get_irq(pdev, 0);
	if (irq < 0) {
		dev_err(dev, "no irq specified\n");
		return irq;
	}

	host = sdhci_alloc_host(dev, sizeof(struct sdhci_s3c));
	if (IS_ERR(host)) {
		dev_err(dev, "sdhci_alloc_host() failed\n");
		return PTR_ERR(host);
	}
	sc = sdhci_priv(host);

	pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
	if (!pdata) {
		ret = -ENOMEM;
		goto err_pdata_io_clk;
	}

	if (pdev->dev.of_node) {
		ret = sdhci_s3c_parse_dt(&pdev->dev, host, pdata);
		if (ret)
			goto err_pdata_io_clk;
	} else {
		memcpy(pdata, pdev->dev.platform_data, sizeof(*pdata));
		sc->ext_cd_gpio = -1; /* invalid gpio number */
	}

	drv_data = sdhci_s3c_get_driver_data(pdev);

	sc->host = host;
	sc->pdev = pdev;
	sc->pdata = pdata;

	platform_set_drvdata(pdev, host);

	sc->clk_io = devm_clk_get(dev, "hsmmc");
	if (IS_ERR(sc->clk_io)) {
		dev_err(dev, "failed to get io clock\n");
		ret = PTR_ERR(sc->clk_io);
		goto err_pdata_io_clk;
	}

	/* enable the local io clock and keep it running for the moment. */
	clk_prepare_enable(sc->clk_io);

	for (clks = 0, ptr = 0; ptr < MAX_BUS_CLK; ptr++) {
		struct clk *clk;
		char name[14];

		snprintf(name, 14, "mmc_busclk.%d", ptr);
		clk = devm_clk_get(dev, name);
		if (IS_ERR(clk))
			continue;

		clks++;
		sc->clk_bus[ptr] = clk;

		/*
		 * save current clock index to know which clock bus
		 * is used later in overriding functions.
		 */
		sc->cur_clk = ptr;

		dev_info(dev, "clock source %d: %s (%ld Hz)\n",
			 ptr, name, clk_get_rate(clk));
	}

	if (clks == 0) {
		dev_err(dev, "failed to find any bus clocks\n");
		ret = -ENOENT;
		goto err_no_busclks;
	}

#ifndef CONFIG_PM_RUNTIME
	clk_prepare_enable(sc->clk_bus[sc->cur_clk]);
#endif

	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
	host->ioaddr = devm_ioremap_resource(&pdev->dev, res);
	if (IS_ERR(host->ioaddr)) {
		ret = PTR_ERR(host->ioaddr);
		goto err_req_regs;
	}

	/* Ensure we have minimal gpio selected CMD/CLK/Detect */
	if (pdata->cfg_gpio)
		pdata->cfg_gpio(pdev, pdata->max_width);

	host->hw_name = "samsung-hsmmc";
	host->ops = &sdhci_s3c_ops;
	host->quirks = 0;
	host->irq = irq;

	/* Setup quirks for the controller */
	host->quirks |= SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC;
	host->quirks |= SDHCI_QUIRK_NO_HISPD_BIT;
	if (drv_data)
		host->quirks |= drv_data->sdhci_quirks;

#ifndef CONFIG_MMC_SDHCI_S3C_DMA

	/* we currently see overruns on errors, so disable the SDMA
	 * support as well. */
	host->quirks |= SDHCI_QUIRK_BROKEN_DMA;

#endif /* CONFIG_MMC_SDHCI_S3C_DMA */

	/* It seems we do not get an DATA transfer complete on non-busy
	 * transfers, not sure if this is a problem with this specific
	 * SDHCI block, or a missing configuration that needs to be set. */
	host->quirks |= SDHCI_QUIRK_NO_BUSY_IRQ;

	/* This host supports the Auto CMD12 */
	host->quirks |= SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12;

	/* Samsung SoCs need BROKEN_ADMA_ZEROLEN_DESC */
	host->quirks |= SDHCI_QUIRK_BROKEN_ADMA_ZEROLEN_DESC;

	if (pdata->cd_type == S3C_SDHCI_CD_NONE ||
	    pdata->cd_type == S3C_SDHCI_CD_PERMANENT)
		host->quirks |= SDHCI_QUIRK_BROKEN_CARD_DETECTION;

	if (pdata->cd_type == S3C_SDHCI_CD_PERMANENT)
		host->mmc->caps = MMC_CAP_NONREMOVABLE;

	switch (pdata->max_width) {
	case 8:
		host->mmc->caps |= MMC_CAP_8_BIT_DATA;
	case 4:
		host->mmc->caps |= MMC_CAP_4_BIT_DATA;
		break;
	}

	if (pdata->pm_caps)
		host->mmc->pm_caps |= pdata->pm_caps;

	host->quirks |= (SDHCI_QUIRK_32BIT_DMA_ADDR |
			 SDHCI_QUIRK_32BIT_DMA_SIZE);

	/* HSMMC on Samsung SoCs uses SDCLK as timeout clock */
	host->quirks |= SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK;

	/*
	 * If controller does not have internal clock divider,
	 * we can use overriding functions instead of default.
	 */
	if (host->quirks & SDHCI_QUIRK_NONSTANDARD_CLOCK) {
		pax_open_kernel();
		*(void **)&sdhci_s3c_ops.set_clock = sdhci_cmu_set_clock;
		*(void **)&sdhci_s3c_ops.get_min_clock = sdhci_cmu_get_min_clock;
		*(void **)&sdhci_s3c_ops.get_max_clock = sdhci_cmu_get_max_clock;
		pax_close_kernel();
	}

	/* It supports additional host capabilities if needed */
	if (pdata->host_caps)
		host->mmc->caps |= pdata->host_caps;

	if (pdata->host_caps2)
		host->mmc->caps2 |= pdata->host_caps2;

	pm_runtime_enable(&pdev->dev);
	pm_runtime_set_autosuspend_delay(&pdev->dev, 50);
	pm_runtime_use_autosuspend(&pdev->dev);
	pm_suspend_ignore_children(&pdev->dev, 1);

	ret = sdhci_add_host(host);
	if (ret) {
		dev_err(dev, "sdhci_add_host() failed\n");
		pm_runtime_forbid(&pdev->dev);
		pm_runtime_get_noresume(&pdev->dev);
		goto err_req_regs;
	}

	/* The following two methods of card detection might call
	   sdhci_s3c_notify_change() immediately, so they can be called
	   only after sdhci_add_host(). Setup errors are ignored. */
	if (pdata->cd_type == S3C_SDHCI_CD_EXTERNAL && pdata->ext_cd_init)
		pdata->ext_cd_init(&sdhci_s3c_notify_change);
	if (pdata->cd_type == S3C_SDHCI_CD_GPIO &&
	    gpio_is_valid(pdata->ext_cd_gpio))
		sdhci_s3c_setup_card_detect_gpio(sc);

#ifdef CONFIG_PM_RUNTIME
	if (pdata->cd_type != S3C_SDHCI_CD_INTERNAL)
		clk_disable_unprepare(sc->clk_io);
#endif
	return 0;

 err_req_regs:
#ifndef CONFIG_PM_RUNTIME
	clk_disable_unprepare(sc->clk_bus[sc->cur_clk]);
#endif

 err_no_busclks:
	clk_disable_unprepare(sc->clk_io);

 err_pdata_io_clk:
	sdhci_free_host(host);

	return ret;
}