Beispiel #1
0
static struct regmap_mmio_context *regmap_mmio_gen_context(struct device *dev,
					const char *clk_id,
					void __iomem *regs,
					const struct regmap_config *config)
{
	struct regmap_mmio_context *ctx;
	int min_stride;
	int ret;

	ret = regmap_mmio_regbits_check(config->reg_bits);
	if (ret)
		return ERR_PTR(ret);

	if (config->pad_bits)
		return ERR_PTR(-EINVAL);

	switch (config->val_bits) {
	case 8:
		/* The core treats 0 as 1 */
		min_stride = 0;
		break;
	case 16:
		min_stride = 2;
		break;
	case 32:
		min_stride = 4;
		break;
#ifdef CONFIG_64BIT
	case 64:
		min_stride = 8;
		break;
#endif
		break;
	default:
		return ERR_PTR(-EINVAL);
	}

	if (config->reg_stride < min_stride)
		return ERR_PTR(-EINVAL);

	switch (config->reg_format_endian) {
	case REGMAP_ENDIAN_DEFAULT:
	case REGMAP_ENDIAN_NATIVE:
		break;
	default:
		return ERR_PTR(-EINVAL);
	}

	ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
	if (!ctx)
		return ERR_PTR(-ENOMEM);

	ctx->regs = regs;
	ctx->val_bytes = config->val_bits / 8;
	ctx->reg_bytes = config->reg_bits / 8;
	ctx->pad_bytes = config->pad_bits / 8;
	ctx->clk = ERR_PTR(-ENODEV);

	if (clk_id == NULL)
		return ctx;

	ctx->clk = clk_get(dev, clk_id);
	if (IS_ERR(ctx->clk)) {
		ret = PTR_ERR(ctx->clk);
		goto err_free;
	}

	ret = clk_prepare(ctx->clk);
	if (ret < 0) {
		clk_put(ctx->clk);
		goto err_free;
	}

	return ctx;

err_free:
	kfree(ctx);

	return ERR_PTR(ret);
}
int msm_cam_clk_enable(struct device *dev, struct msm_cam_clk_info *clk_info,
		struct clk **clk_ptr, int num_clk, int enable)
{
	int i;
	int rc = 0;
	long clk_rate;
	if (enable) {
		for (i = 0; i < num_clk; i++) {
			CDBG("%s enable %s\n", __func__, clk_info[i].clk_name);
			clk_ptr[i] = clk_get(dev, clk_info[i].clk_name);
			if (IS_ERR(clk_ptr[i])) {
				pr_err("%s get failed\n", clk_info[i].clk_name);
				rc = PTR_ERR(clk_ptr[i]);
				goto cam_clk_get_err;
			}
			if (clk_info[i].clk_rate > 0) {
				clk_rate = clk_round_rate(clk_ptr[i],
					clk_info[i].clk_rate);
				if (clk_rate < 0) {
					pr_err("%s round failed\n",
						   clk_info[i].clk_name);
					goto cam_clk_set_err;
				}
				rc = clk_set_rate(clk_ptr[i],
					clk_rate);
				if (rc < 0) {
					pr_err("%s set failed\n",
						clk_info[i].clk_name);
					goto cam_clk_set_err;
				}

			} else if (clk_info[i].clk_rate == INIT_RATE) {
				clk_rate = clk_get_rate(clk_ptr[i]);
				if (clk_rate == 0) {
					clk_rate =
						  clk_round_rate(clk_ptr[i], 0);
					if (clk_rate < 0) {
						pr_err("%s round rate failed\n",
							  clk_info[i].clk_name);
						goto cam_clk_set_err;
					}
					rc = clk_set_rate(clk_ptr[i],
								clk_rate);
					if (rc < 0) {
						pr_err("%s set rate failed\n",
							  clk_info[i].clk_name);
						goto cam_clk_set_err;
					}
				}
			}
			rc = clk_prepare(clk_ptr[i]);
			if (rc < 0) {
				pr_err("%s prepare failed\n",
					   clk_info[i].clk_name);
				goto cam_clk_prepare_err;
			}

			rc = clk_enable(clk_ptr[i]);
			if (rc < 0) {
				pr_err("%s enable failed\n",
					   clk_info[i].clk_name);
				goto cam_clk_enable_err;
			}
			if (clk_info[i].delay > 20) {
				msleep(clk_info[i].delay);
			} else if (clk_info[i].delay) {
				usleep_range(clk_info[i].delay * 1000,
					(clk_info[i].delay * 1000) + 1000);
			}
		}
	} else {
		for (i = num_clk - 1; i >= 0; i--) {
			if (clk_ptr[i] != NULL) {
				CDBG("%s disable %s\n", __func__,
					clk_info[i].clk_name);
				clk_disable(clk_ptr[i]);
				clk_unprepare(clk_ptr[i]);
				clk_put(clk_ptr[i]);
			}
		}
	}
	return rc;


cam_clk_enable_err:
	clk_unprepare(clk_ptr[i]);
cam_clk_prepare_err:
cam_clk_set_err:
	clk_put(clk_ptr[i]);
cam_clk_get_err:
	for (i--; i >= 0; i--) {
		if (clk_ptr[i] != NULL) {
			clk_disable(clk_ptr[i]);
			clk_unprepare(clk_ptr[i]);
			clk_put(clk_ptr[i]);
		}
	}
	return rc;
}
Beispiel #3
0
static int nuc970_adc_probe(struct platform_device *pdev)
{
	struct iio_dev	*indio_dev;
    struct nuc970_adc_device *info = NULL;
    int ret = -ENODEV;
	struct resource *res;    
	int irq;

    indio_dev = iio_device_alloc(sizeof(struct nuc970_adc_device));
	if (indio_dev == NULL) {
		dev_err(&pdev->dev, "failed to allocate iio device\n");
		ret = -ENOMEM;
		goto err_ret;
	}
    
    info = iio_priv(indio_dev);
    
    /* map the registers */
	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
	if (res == NULL) {
		dev_err(&pdev->dev, "cannot find IO resource\n");
		ret = -ENOENT;
		goto err_ret;
	}
  
  	info->regs = ioremap(res->start, resource_size(res));
	if (info->regs == NULL) {
		dev_err(&pdev->dev, "cannot map IO\n");
		ret = -ENXIO;
		goto err_ret;
	}

	indio_dev->dev.parent = &pdev->dev;
	indio_dev->name = dev_name(&pdev->dev);
	indio_dev->modes = INDIO_DIRECT_MODE;
	indio_dev->info = &nuc970_adc_info;
#ifdef CONFIG_BOARD_TOMATO    
    indio_dev->num_channels = 4;
#else
    indio_dev->num_channels = 8;
#endif
    indio_dev->channels = nuc970_adc_iio_channels;
    indio_dev->masklength = indio_dev->num_channels - 1;
    
    /* find the clock and enable it */	
	info->eclk=clk_get(NULL, "adc_eclk");
    clk_prepare(info->eclk);
    clk_enable(info->eclk);
    info->clk=clk_get(NULL, "adc");
    clk_prepare(info->clk);
    clk_enable(info->clk);

    clk_set_rate(info->eclk, 1000000);
    
    irq = platform_get_irq(pdev, 0);
	if (irq < 0) {
		dev_err(&pdev->dev, "no irq resource?\n");
		ret = irq;
		goto err_ret;
	}

	info->irq = irq;

	init_completion(&info->completion);

	ret = request_irq(info->irq, nuc970_adc_isr,
					0, dev_name(&pdev->dev), info);
	if (ret < 0) {
		dev_err(&pdev->dev, "failed requesting irq, irq = %d\n",
							info->irq);
		goto err_ret;
	}

#ifdef CONFIG_NUC970_NADC_BANDGAP
    writel(3, info->regs + CTL); //enable AD_EN
#endif
#ifdef CONFIG_NUC970_NADC_VREF
    writel(1, info->regs + CTL); //enable AD_EN
#endif
#ifdef CONFIG_NUC970_NADC_I33V
    writel(0x3<<6, info->regs + CONF); //select AGND33 vs AVDD33
    writel(1, info->regs + CTL); //enable AD_EN, disable bandgap
#endif

    writel(1, info->regs + IER); //enable M_IEN
    
    ret = iio_triggered_buffer_setup(indio_dev, &iio_pollfunc_store_time,
			&nuc970_trigger_handler, &nuc970_ring_setup_ops);
	if (ret)
		goto err_free_channels;
    
	ret = iio_device_register(indio_dev);
	if (ret < 0) {
		printk("Couldn't register NC970 ADC..\n");
        goto err_free_channels;
    }

	platform_set_drvdata(pdev, indio_dev);
    
    writel((readl(info->regs + CONF) | 1<<2), info->regs + CONF); //enable NACEN
    
    printk("%s: nuc970 Normal ADC adapter\n",
						indio_dev->name);
                        
	return 0;

err_free_channels:
	nuc970_adc_channels_remove(indio_dev);
	iio_device_free(indio_dev);
err_ret:
	return ret;
}
int msm_cam_clk_enable(struct device *dev, struct msm_cam_clk_info *clk_info,
		struct clk **clk_ptr, int num_clk, int enable)
{
	int i;
	int rc = 0;
	if (enable) {
		for (i = 0; i < num_clk; i++) {
			clk_ptr[i] = clk_get(dev, clk_info[i].clk_name);
			if (IS_ERR(clk_ptr[i])) {
				pr_err("%s get failed\n", clk_info[i].clk_name);
				rc = PTR_ERR(clk_ptr[i]);
				goto cam_clk_get_err;
			}
			if (clk_info[i].clk_rate >= 0) {
				rc = clk_set_rate(clk_ptr[i],
							clk_info[i].clk_rate);
				if (rc < 0) {
					pr_err("%s set failed\n",
						   clk_info[i].clk_name);
					goto cam_clk_set_err;
				}
			}
			rc = clk_prepare(clk_ptr[i]);
			if (rc < 0) {
				pr_err("%s prepare failed\n",
					   clk_info[i].clk_name);
				goto cam_clk_prepare_err;
			}

			rc = clk_enable(clk_ptr[i]);
			if (rc < 0) {
				pr_err("%s enable failed\n",
					   clk_info[i].clk_name);
				goto cam_clk_enable_err;
			}
		}
	} else {
		for (i = num_clk - 1; i >= 0; i--) {
			if (clk_ptr[i] != NULL) {
				clk_disable(clk_ptr[i]);
				clk_unprepare(clk_ptr[i]);
				clk_put(clk_ptr[i]);
			}
		}
	}
	return rc;


cam_clk_enable_err:
	clk_unprepare(clk_ptr[i]);
cam_clk_prepare_err:
cam_clk_set_err:
	clk_put(clk_ptr[i]);
cam_clk_get_err:
	for (i--; i >= 0; i--) {
		if (clk_ptr[i] != NULL) {
			clk_disable(clk_ptr[i]);
			clk_unprepare(clk_ptr[i]);
			clk_put(clk_ptr[i]);
		}
	}
	return rc;
}
static int kpd_pdrv_probe(struct platform_device *pdev)
{

	int i, r;
	int err = 0;
	struct clk *kpd_clk = NULL;

	kpd_info("Keypad probe start!!!\n");

	/*kpd-clk should be control by kpd driver, not depend on default clock state*/
	kpd_clk = devm_clk_get(&pdev->dev, "kpd-clk");
	if (!IS_ERR(kpd_clk)) {
		clk_prepare(kpd_clk);
		clk_enable(kpd_clk);
	} else {
		kpd_print("get kpd-clk fail, but not return, maybe kpd-clk is set by ccf.\n");
	}

	kp_base = of_iomap(pdev->dev.of_node, 0);
	if (!kp_base) {
		kpd_info("KP iomap failed\n");
		return -ENODEV;
	};

	kp_irqnr = irq_of_parse_and_map(pdev->dev.of_node, 0);
	if (!kp_irqnr) {
		kpd_info("KP get irqnr failed\n");
		return -ENODEV;
	}
	kpd_info("kp base: 0x%p, addr:0x%p,  kp irq: %d\n", kp_base, &kp_base, kp_irqnr);
	/* initialize and register input device (/dev/input/eventX) */
	kpd_input_dev = input_allocate_device();
	if (!kpd_input_dev) {
		kpd_print("input allocate device fail.\n");
		return -ENOMEM;
	}

	kpd_input_dev->name = KPD_NAME;
	kpd_input_dev->id.bustype = BUS_HOST;
	kpd_input_dev->id.vendor = 0x2454;
	kpd_input_dev->id.product = 0x6500;
	kpd_input_dev->id.version = 0x0010;
	kpd_input_dev->open = kpd_open;

	kpd_get_dts_info(pdev->dev.of_node);

#ifdef CONFIG_ARCH_MT8173
	wake_lock_init(&pwrkey_lock, WAKE_LOCK_SUSPEND, "PWRKEY");
#endif

	/* fulfill custom settings */
	kpd_memory_setting();

	__set_bit(EV_KEY, kpd_input_dev->evbit);

#if defined(CONFIG_KPD_PWRKEY_USE_EINT) || defined(CONFIG_KPD_PWRKEY_USE_PMIC)
	__set_bit(kpd_dts_data.kpd_sw_pwrkey, kpd_input_dev->keybit);
	kpd_keymap[8] = 0;
#endif
	if (!kpd_dts_data.kpd_use_extend_type) {
		for (i = 17; i < KPD_NUM_KEYS; i += 9)	/* only [8] works for Power key */
			kpd_keymap[i] = 0;
	}
	for (i = 0; i < KPD_NUM_KEYS; i++) {
		if (kpd_keymap[i] != 0)
			__set_bit(kpd_keymap[i], kpd_input_dev->keybit);
	}

#if KPD_AUTOTEST
	for (i = 0; i < ARRAY_SIZE(kpd_auto_keymap); i++)
		__set_bit(kpd_auto_keymap[i], kpd_input_dev->keybit);
#endif

#if KPD_HAS_SLIDE_QWERTY
	__set_bit(EV_SW, kpd_input_dev->evbit);
	__set_bit(SW_LID, kpd_input_dev->swbit);
#endif
	if (kpd_dts_data.kpd_sw_rstkey)
		__set_bit(kpd_dts_data.kpd_sw_rstkey, kpd_input_dev->keybit);
#ifdef KPD_KEY_MAP
	__set_bit(KPD_KEY_MAP, kpd_input_dev->keybit);
#endif
#ifdef CONFIG_MTK_MRDUMP_KEY
		__set_bit(KEY_RESTART, kpd_input_dev->keybit);
#endif
	kpd_input_dev->dev.parent = &pdev->dev;
	r = input_register_device(kpd_input_dev);
	if (r) {
		kpd_info("register input device failed (%d)\n", r);
		input_free_device(kpd_input_dev);
		return r;
	}

	/* register device (/dev/mt6575-kpd) */
	kpd_dev.parent = &pdev->dev;
	r = misc_register(&kpd_dev);
	if (r) {
		kpd_info("register device failed (%d)\n", r);
		input_unregister_device(kpd_input_dev);
		return r;
	}

	wake_lock_init(&kpd_suspend_lock, WAKE_LOCK_SUSPEND, "kpd wakelock");

	/* register IRQ and EINT */
	kpd_set_debounce(kpd_dts_data.kpd_key_debounce);
	r = request_irq(kp_irqnr, kpd_irq_handler, IRQF_TRIGGER_NONE, KPD_NAME, NULL);
	if (r) {
		kpd_info("register IRQ failed (%d)\n", r);
		misc_deregister(&kpd_dev);
		input_unregister_device(kpd_input_dev);
		return r;
	}
	mt_eint_register();

#ifndef KPD_EARLY_PORTING	/*add for avoid early porting build err the macro is defined in custom file */
	long_press_reboot_function_setting();	/* /API 4 for kpd long press reboot function setting */
#endif
	hrtimer_init(&aee_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
	aee_timer.function = aee_timer_func;

#if AEE_ENABLE_5_15
	hrtimer_init(&aee_timer_5s, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
	aee_timer_5s.function = aee_timer_5s_func;
#endif
	err = kpd_create_attr(&kpd_pdrv.driver);
	if (err) {
		kpd_info("create attr file fail\n");
		kpd_delete_attr(&kpd_pdrv.driver);
		return err;
	}
	kpd_info("%s Done\n", __func__);
	return 0;
}
Beispiel #6
0
static int efm32_adc_read_single(struct device *dev,
		struct device_attribute *devattr, unsigned int *val)
{
	struct platform_device *pdev = to_platform_device(dev);
	struct efm32_adc_ddata *ddata = platform_get_drvdata(pdev);
	int ret;
	struct efm32_adc_irqdata irqdata = { .ddata = ddata, };
	u32 status;
	unsigned long freq;

	ret = clk_prepare(ddata->clk);
	if (ret < 0) {
		dev_dbg(ddata->hwmondev, "failed to enable clk (%d)\n", ret);
		return ret;
	}

	spin_lock_irq(&ddata->lock);
	if (ddata->busy) {
		dev_dbg(ddata->hwmondev, "busy\n");
		ret = -EBUSY;
		goto out_unlock;
	}

	init_completion(&irqdata.done);

	ret = clk_enable(ddata->clk);
	if (ret < 0) {
		dev_dbg(ddata->hwmondev, "failed to enable clk (%d)\n", ret);
		goto out_unlock;
	}
	freq = clk_get_rate(ddata->clk);

	efm32_adc_write32(ddata,
			ADC_CMD_SINGLESTOP | ADC_CMD_SCANSTOP, ADC_CMD);
	efm32_adc_write32(ddata, ((freq - 1) / 1000000) << 16 |
			((freq / 400000) - 1) << 8, ADC_CTRL);
	efm32_adc_write32(ddata, 0x800, ADC_SINGLECTRL);
	efm32_adc_write32(ddata, ADC_IF_SINGLE, ADC_IFC);
	efm32_adc_write32(ddata, ADC_CMD_SINGLESTART, ADC_CMD);

	ret = request_irq(ddata->irq, efm32_adc_irq, 0, DRIVER_NAME, &irqdata);
	if (ret) {
		efm32_adc_write32(ddata, ADC_CMD_SINGLESTOP, ADC_CMD);
		goto out_clkoff;
	}

	efm32_adc_write32(ddata, ADC_IF_SINGLE, ADC_IEN);

	ddata->busy = 1;

	spin_unlock_irq(&ddata->lock);

	ret = wait_for_completion_interruptible_timeout(&irqdata.done, 2 * HZ);

	spin_lock_irq(&ddata->lock);

	efm32_adc_write32(ddata, 0, ADC_IEN);
	free_irq(ddata->irq, &irqdata);

	if (ret < 0)
		goto done_out_unlock;

	status = efm32_adc_read32(ddata, ADC_STATUS);
	if (status & ADC_STATUS_SINGLEDV) {
		*val = efm32_adc_read32(ddata, ADC_SINGLEDATA);
		ret = 0;
	} else
		ret = -ETIMEDOUT;

done_out_unlock:
	ddata->busy = 0;
out_clkoff:
	clk_disable(ddata->clk);
out_unlock:
	spin_unlock_irq(&ddata->lock);

	clk_unprepare(ddata->clk);

	return ret;
}
Beispiel #7
0
/*
 * Should only be called when port is inactive.
 * although can be called multiple times by upper layers.
 */
static int mxs_saif_hw_params(struct snd_pcm_substream *substream,
			     struct snd_pcm_hw_params *params,
			     struct snd_soc_dai *cpu_dai)
{
	struct mxs_saif *saif = snd_soc_dai_get_drvdata(cpu_dai);
	struct mxs_saif *master_saif;
	u32 scr, stat;
	int ret;

	master_saif = mxs_saif_get_master(saif);
	if (!master_saif)
		return -EINVAL;

	/* mclk should already be set */
	if (!saif->mclk && saif->mclk_in_use) {
		dev_err(cpu_dai->dev, "set mclk first\n");
		return -EINVAL;
	}

	stat = __raw_readl(saif->base + SAIF_STAT);
	if (!saif->mclk_in_use && (stat & BM_SAIF_STAT_BUSY)) {
		dev_err(cpu_dai->dev, "error: busy\n");
		return -EBUSY;
	}

	/*
	 * Set saif clk based on sample rate.
	 * If mclk is used, we also set mclk, if not, saif->mclk is
	 * default 0, means not used.
	 */
	ret = mxs_saif_set_clk(saif, saif->mclk, params_rate(params));
	if (ret) {
		dev_err(cpu_dai->dev, "unable to get proper clk\n");
		return ret;
	}

	if (saif != master_saif) {
		/*
		* Set an initial clock rate for the saif internal logic to work
		* properly. This is important when working in EXTMASTER mode
		* that uses the other saif's BITCLK&LRCLK but it still needs a
		* basic clock which should be fast enough for the internal
		* logic.
		*/
		clk_enable(saif->clk);
		ret = clk_set_rate(saif->clk, 24000000);
		clk_disable(saif->clk);
		if (ret)
			return ret;

		ret = clk_prepare(master_saif->clk);
		if (ret)
			return ret;
	}

	scr = __raw_readl(saif->base + SAIF_CTRL);

	scr &= ~BM_SAIF_CTRL_WORD_LENGTH;
	scr &= ~BM_SAIF_CTRL_BITCLK_48XFS_ENABLE;
	switch (params_format(params)) {
	case SNDRV_PCM_FORMAT_S16_LE:
		scr |= BF_SAIF_CTRL_WORD_LENGTH(0);
		break;
	case SNDRV_PCM_FORMAT_S20_3LE:
		scr |= BF_SAIF_CTRL_WORD_LENGTH(4);
		scr |= BM_SAIF_CTRL_BITCLK_48XFS_ENABLE;
		break;
	case SNDRV_PCM_FORMAT_S24_LE:
		scr |= BF_SAIF_CTRL_WORD_LENGTH(8);
		scr |= BM_SAIF_CTRL_BITCLK_48XFS_ENABLE;
		break;
	default:
		return -EINVAL;
	}

	/* Tx/Rx config */
	if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
		/* enable TX mode */
		scr &= ~BM_SAIF_CTRL_READ_MODE;
	} else {
		/* enable RX mode */
		scr |= BM_SAIF_CTRL_READ_MODE;
	}

	__raw_writel(scr, saif->base + SAIF_CTRL);
	return 0;
}
Beispiel #8
0
static int spear_kbd_probe(struct platform_device *pdev)
{
	struct kbd_platform_data *pdata = dev_get_platdata(&pdev->dev);
	const struct matrix_keymap_data *keymap = pdata ? pdata->keymap : NULL;
	struct spear_kbd *kbd;
	struct input_dev *input_dev;
	struct resource *res;
	int irq;
	int error;

	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
	if (!res) {
		dev_err(&pdev->dev, "no keyboard resource defined\n");
		return -EBUSY;
	}

	irq = platform_get_irq(pdev, 0);
	if (irq < 0) {
		dev_err(&pdev->dev, "not able to get irq for the device\n");
		return irq;
	}

	kbd = devm_kzalloc(&pdev->dev, sizeof(*kbd), GFP_KERNEL);
	if (!kbd) {
		dev_err(&pdev->dev, "not enough memory for driver data\n");
		return -ENOMEM;
	}

	input_dev = devm_input_allocate_device(&pdev->dev);
	if (!input_dev) {
		dev_err(&pdev->dev, "unable to allocate input device\n");
		return -ENOMEM;
	}

	kbd->input = input_dev;
	kbd->irq = irq;

	if (!pdata) {
		error = spear_kbd_parse_dt(pdev, kbd);
		if (error)
			return error;
	} else {
		kbd->mode = pdata->mode;
		kbd->rep = pdata->rep;
		kbd->suspended_rate = pdata->suspended_rate;
	}

	kbd->io_base = devm_ioremap_resource(&pdev->dev, res);
	if (IS_ERR(kbd->io_base))
		return PTR_ERR(kbd->io_base);

	kbd->clk = devm_clk_get(&pdev->dev, NULL);
	if (IS_ERR(kbd->clk))
		return PTR_ERR(kbd->clk);

	input_dev->name = "Spear Keyboard";
	input_dev->phys = "keyboard/input0";
	input_dev->id.bustype = BUS_HOST;
	input_dev->id.vendor = 0x0001;
	input_dev->id.product = 0x0001;
	input_dev->id.version = 0x0100;
	input_dev->open = spear_kbd_open;
	input_dev->close = spear_kbd_close;

	error = matrix_keypad_build_keymap(keymap, NULL, NUM_ROWS, NUM_COLS,
					   kbd->keycodes, input_dev);
	if (error) {
		dev_err(&pdev->dev, "Failed to build keymap\n");
		return error;
	}

	if (kbd->rep)
		__set_bit(EV_REP, input_dev->evbit);
	input_set_capability(input_dev, EV_MSC, MSC_SCAN);

	input_set_drvdata(input_dev, kbd);

	error = devm_request_irq(&pdev->dev, irq, spear_kbd_interrupt, 0,
			"keyboard", kbd);
	if (error) {
		dev_err(&pdev->dev, "request_irq failed\n");
		return error;
	}

	error = clk_prepare(kbd->clk);
	if (error)
		return error;

	error = input_register_device(input_dev);
	if (error) {
		dev_err(&pdev->dev, "Unable to register keyboard device\n");
		clk_unprepare(kbd->clk);
		return error;
	}

	device_init_wakeup(&pdev->dev, 1);
	platform_set_drvdata(pdev, kbd);

	return 0;
}
Beispiel #9
0
//Camera key bring up -E
static int kpd_pdrv_probe(struct platform_device *pdev)
{

	int i, r;
	int err = 0;
	struct clk *kpd_clk = NULL;
	//Keypad porting - S
  #if 1
	struct pinctrl *pinctrl1;
	struct pinctrl_state *pins_default, *pins_eint_int;
  #endif
        //Keypad porting - E
	kpd_info("Keypad probe start!!!\n");

	/*kpd-clk should be control by kpd driver, not depend on default clock state*/
	kpd_clk = devm_clk_get(&pdev->dev, "kpd-clk");
	if (!IS_ERR(kpd_clk)) {
		clk_prepare(kpd_clk);
		clk_enable(kpd_clk);
	} else {
		kpd_print("get kpd-clk fail, but not return, maybe kpd-clk is set by ccf.\n");
	}

	kp_base = of_iomap(pdev->dev.of_node, 0);
	if (!kp_base) {
		kpd_info("KP iomap failed\n");
		return -ENODEV;
	};

	kp_irqnr = irq_of_parse_and_map(pdev->dev.of_node, 0);
	if (!kp_irqnr) {
		kpd_info("KP get irqnr failed\n");
		return -ENODEV;
	}
	kpd_info("kp base: 0x%p, addr:0x%p,  kp irq: %d\n", kp_base, &kp_base, kp_irqnr);
	/* initialize and register input device (/dev/input/eventX) */
	kpd_input_dev = input_allocate_device();
	if (!kpd_input_dev) {
		kpd_print("input allocate device fail.\n");
		return -ENOMEM;
	}

	kpd_input_dev->name = KPD_NAME;
	kpd_input_dev->id.bustype = BUS_HOST;
	kpd_input_dev->id.vendor = 0x2454;
	kpd_input_dev->id.product = 0x6500;
	kpd_input_dev->id.version = 0x0010;
	kpd_input_dev->open = kpd_open;

	kpd_get_dts_info(pdev->dev.of_node);

#ifdef CONFIG_ARCH_MT8173
	wake_lock_init(&pwrkey_lock, WAKE_LOCK_SUSPEND, "PWRKEY");
#endif

	/* fulfill custom settings */
	kpd_memory_setting();

	__set_bit(EV_KEY, kpd_input_dev->evbit);
//keypad bring up - S

#if 1  //for volume down key
  pinctrl1 = devm_pinctrl_get(&pdev->dev);
	if (IS_ERR(pinctrl1)) {
		err = PTR_ERR(pinctrl1);
		dev_err(&pdev->dev, "fwq Cannot find voldown pinctrl1!\n");
		return err;
	}

	pins_default = pinctrl_lookup_state(pinctrl1, "default");
	if (IS_ERR(pins_default)) {
		err = PTR_ERR(pins_default);
		dev_err(&pdev->dev, "fwq Cannot find voldown pinctrl default!\n");
	}

	pins_eint_int = pinctrl_lookup_state(pinctrl1, "kpd_pins_eint");
	if (IS_ERR(pins_eint_int)) {
		err = PTR_ERR(pins_eint_int);
		dev_err(&pdev->dev, "fwq Cannot find voldown pinctrl state_eint_int!\n");
		return err;
	}
#endif
	#if 0
	gpio_request(KPD_VOLUP , "KPD_KCOL1");
	gpio_direction_input(KPD_VOLUP);
	gpio_free(KPD_VOLUP);
	#endif
	pinctrl_select_state(pinctrl1, pins_eint_int);
//keypad bring up - E

	/**/
	err = hall_gpio_eint_setup(pdev);
	if (err!=0) {
		kpd_print("[Keypad] %s , hall_gpio_eint_setup failed (%d)\n", __FUNCTION__ , err );
	}

	proc_create_data("hall_out_status", 0444, NULL, &hall_out_status_fops, NULL);
	sdev.name = "hall_gpio";
	sdev.index = 0;
	sdev.state = 1;
	r = switch_dev_register(&sdev);
	if (r) {
		kpd_info("[Keypad] %s , register switch device failed (%d)\n", __FUNCTION__ , r);
		switch_dev_unregister(&sdev);
		return r;
	}
	/**/
	switch_set_state((struct switch_dev *)&sdev, 1);	// state initialization
	/**/
	/**/
	mutex_init(&hall_state_mutex);
	INIT_DELAYED_WORK(&hall_work, hall_work_func);
	/**/
	/**/

#if defined(CONFIG_KPD_PWRKEY_USE_EINT) || defined(CONFIG_KPD_PWRKEY_USE_PMIC)
	__set_bit(kpd_dts_data.kpd_sw_pwrkey, kpd_input_dev->keybit);
	kpd_keymap[8] = 0;
#endif
	if (!kpd_dts_data.kpd_use_extend_type) {
		for (i = 17; i < KPD_NUM_KEYS; i += 9)	/* only [8] works for Power key */
			kpd_keymap[i] = 0;
	}
	for (i = 0; i < KPD_NUM_KEYS; i++) {
		if (kpd_keymap[i] != 0)
			__set_bit(kpd_keymap[i], kpd_input_dev->keybit);
	}

#if KPD_AUTOTEST
	for (i = 0; i < ARRAY_SIZE(kpd_auto_keymap); i++)
		__set_bit(kpd_auto_keymap[i], kpd_input_dev->keybit);
#endif

#if KPD_HAS_SLIDE_QWERTY
	__set_bit(EV_SW, kpd_input_dev->evbit);
	__set_bit(SW_LID, kpd_input_dev->swbit);
#endif
	if (kpd_dts_data.kpd_sw_rstkey)
		__set_bit(kpd_dts_data.kpd_sw_rstkey, kpd_input_dev->keybit);
#ifdef KPD_KEY_MAP
	__set_bit(KPD_KEY_MAP, kpd_input_dev->keybit);
#endif
#ifdef CONFIG_MTK_MRDUMP_KEY
		__set_bit(KEY_RESTART, kpd_input_dev->keybit);
#endif
//Caerma key porting
#if 1
	for (i = 0; i < KPD_CAMERA_NUM; i++) {
		if (kpd_camerakeymap[i] != 0)
	__set_bit(kpd_camerakeymap[i], kpd_input_dev->keybit);
		kpd_info("[Keypad] set kpd_camerakeymap[%d]" , i);
		}
#endif
	kpd_input_dev->dev.parent = &pdev->dev;
	r = input_register_device(kpd_input_dev);
	if (r) {
		kpd_info("register input device failed (%d)\n", r);
		input_free_device(kpd_input_dev);
		return r;
	}

	/* register device (/dev/mt6575-kpd) */
	kpd_dev.parent = &pdev->dev;
	r = misc_register(&kpd_dev);
	if (r) {
		kpd_info("register device failed (%d)\n", r);
		input_unregister_device(kpd_input_dev);
		return r;
	}

	wake_lock_init(&kpd_suspend_lock, WAKE_LOCK_SUSPEND, "kpd wakelock");

	/* register IRQ and EINT */
	kpd_set_debounce(kpd_dts_data.kpd_key_debounce);
	r = request_irq(kp_irqnr, kpd_irq_handler, IRQF_TRIGGER_NONE, KPD_NAME, NULL);
	if (r) {
		kpd_info("register IRQ failed (%d)\n", r);
		misc_deregister(&kpd_dev);
		input_unregister_device(kpd_input_dev);
		return r;
	}
	mt_eint_register();

        //Camera key bring up -S
        printk("camera_key_setup_eint() START!!\n");
	kpd_camerakey_setup_eint();
	printk("camera_key_setup_eint() Done!!\n");
	//Camera key bring up -E

#ifndef KPD_EARLY_PORTING	/*add for avoid early porting build err the macro is defined in custom file */
	long_press_reboot_function_setting();	/* /API 4 for kpd long press reboot function setting */
#endif
	hrtimer_init(&aee_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
	aee_timer.function = aee_timer_func;

#if AEE_ENABLE_5_15
	hrtimer_init(&aee_timer_5s, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
	aee_timer_5s.function = aee_timer_5s_func;
#endif

#ifdef PWK_DUMP
       hrtimer_init(&aee_timer_powerkey_30s, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
       aee_timer_powerkey_30s.function = aee_timer_30s_func;
#endif

	err = kpd_create_attr(&kpd_pdrv.driver);
	if (err) {
		kpd_info("create attr file fail\n");
		kpd_delete_attr(&kpd_pdrv.driver);
		return err;
	}
	kpd_info("%s Done\n", __func__);
	return 0;
}
Beispiel #10
0
static int dw_spi_mmio_probe(struct platform_device *pdev)
{
	struct dw_spi_mmio *dwsmmio;
	struct dw_spi *dws;
	struct resource *mem, *ioarea;
	int ret;
#ifdef CONFIG_OF
	unsigned int prop;
#endif
	dwsmmio = kzalloc(sizeof(struct dw_spi_mmio), GFP_KERNEL);
	if (!dwsmmio) {
		ret = -ENOMEM;
		goto err_end;
	}

	dws = &dwsmmio->dws;

	/* Get basic io resource and map it */
	mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
	if (!mem) {
		dev_err(&pdev->dev, "no mem resource?\n");
		ret = -EINVAL;
		goto err_kfree;
	}

	ioarea = request_mem_region(mem->start, resource_size(mem),
			pdev->name);
	if (!ioarea) {
		dev_err(&pdev->dev, "SPI region already claimed\n");
		ret = -EBUSY;
		goto err_kfree;
	}

	dws->regs = ioremap_nocache(mem->start, resource_size(mem));
	dws->paddr = mem->start;
	if (!dws->regs) {
		dev_err(&pdev->dev, "SPI region already mapped\n");
		ret = -ENOMEM;
		goto err_release_reg;
	}

	dws->irq = platform_get_irq(pdev, 0);
	if (dws->irq < 0) {
		dev_err(&pdev->dev, "no irq resource?\n");
		ret = dws->irq; /* -ENXIO */
		goto err_unmap;
	}

	dwsmmio->clk = clk_get(&pdev->dev, NULL);
	if (IS_ERR(dwsmmio->clk)) {
		ret = PTR_ERR(dwsmmio->clk);
		goto err_irq;
	}
	clk_prepare(dwsmmio->clk);
	clk_enable(dwsmmio->clk);

#ifdef CONFIG_OF
	if(of_property_read_u32(pdev->dev.of_node, "num-chipselect", &prop)) {
		ret = -ENXIO;
		dev_err(&pdev->dev, "couldn't determine num-chipselect\n");
		goto err_irq;
	}
	dws->num_cs = prop;

	if(of_property_read_u32(pdev->dev.of_node, "bus-num", &prop)) {
		ret = -ENXIO;
		dev_err(&pdev->dev, "couldn't determine bus-num\n");
		goto err_irq;
	}
	dws->bus_num = prop;
#else
	dws->num_cs = 4;
	dws->bus_num = 0;
#endif

	dws->parent_dev = &pdev->dev;
	dws->max_freq = clk_get_rate(dwsmmio->clk);

#ifdef CONFIG_SPI_DW_PL330_DMA
	ret = dw_spi_pl330_init(dws);
	if (ret)
		goto err_clk;
#endif

	ret = dw_spi_add_host(dws);
	if (ret)
		goto err_clk;

	platform_set_drvdata(pdev, dwsmmio);
	return 0;

err_clk:
	clk_disable(dwsmmio->clk);
	clk_put(dwsmmio->clk);
	dwsmmio->clk = NULL;
err_irq:
	free_irq(dws->irq, dws);
err_unmap:
	iounmap(dws->regs);
err_release_reg:
	release_mem_region(mem->start, resource_size(mem));
err_kfree:
	kfree(dwsmmio);
err_end:
	return ret;
}
Beispiel #11
0
static int omap_usb2_probe(struct platform_device *pdev)
{
	struct omap_usb			*phy;
	struct usb_otg			*otg;

	phy = devm_kzalloc(&pdev->dev, sizeof(*phy), GFP_KERNEL);
	if (!phy) {
		dev_err(&pdev->dev, "unable to allocate memory for USB2 PHY\n");
		return -ENOMEM;
	}

	otg = devm_kzalloc(&pdev->dev, sizeof(*otg), GFP_KERNEL);
	if (!otg) {
		dev_err(&pdev->dev, "unable to allocate memory for USB OTG\n");
		return -ENOMEM;
	}

	phy->dev		= &pdev->dev;

	phy->phy.dev		= phy->dev;
	phy->phy.label		= "omap-usb2";
	phy->phy.set_suspend	= omap_usb2_suspend;
	phy->phy.otg		= otg;
	phy->phy.type		= USB_PHY_TYPE_USB2;

	phy->control_dev = omap_get_control_dev();
	if (IS_ERR(phy->control_dev)) {
		dev_dbg(&pdev->dev, "Failed to get control device\n");
		return -ENODEV;
	}

	phy->is_suspended	= 1;
	omap_control_usb_phy_power(phy->control_dev, 0);

	otg->set_host		= omap_usb_set_host;
	otg->set_peripheral	= omap_usb_set_peripheral;
	otg->set_vbus		= omap_usb_set_vbus;
	otg->start_srp		= omap_usb_start_srp;
	otg->phy		= &phy->phy;

	phy->wkupclk = devm_clk_get(phy->dev, "usb_phy_cm_clk32k");
	if (IS_ERR(phy->wkupclk)) {
		dev_err(&pdev->dev, "unable to get usb_phy_cm_clk32k\n");
		return PTR_ERR(phy->wkupclk);
	}
	clk_prepare(phy->wkupclk);

	phy->optclk = devm_clk_get(phy->dev, "usb_otg_ss_refclk960m");
	if (IS_ERR(phy->optclk))
		dev_vdbg(&pdev->dev, "unable to get refclk960m\n");
	else
		clk_prepare(phy->optclk);

	usb_add_phy_dev(&phy->phy);

	platform_set_drvdata(pdev, phy);

	pm_runtime_enable(phy->dev);

	return 0;
}
static int samsung_usb2phy_probe(struct platform_device *pdev)
{
	struct samsung_usbphy *sphy;
	struct usb_otg *otg;
	struct samsung_usbphy_data *pdata = pdev->dev.platform_data;
	const struct samsung_usbphy_drvdata *drv_data;
	struct device *dev = &pdev->dev;
	struct resource *phy_mem;
	void __iomem	*phy_base;
	struct clk *clk;
	int ret;

	phy_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
	phy_base = devm_ioremap_resource(dev, phy_mem);
	if (IS_ERR(phy_base))
		return PTR_ERR(phy_base);

	sphy = devm_kzalloc(dev, sizeof(*sphy), GFP_KERNEL);
	if (!sphy)
		return -ENOMEM;

	otg = devm_kzalloc(dev, sizeof(*otg), GFP_KERNEL);
	if (!otg)
		return -ENOMEM;

	drv_data = samsung_usbphy_get_driver_data(pdev);

	if (drv_data->cpu_type == TYPE_EXYNOS5250 ||
		drv_data->cpu_type == TYPE_EXYNOS5)
		clk = devm_clk_get(dev, "usbhost");
	else
		clk = devm_clk_get(dev, "otg");

	if (IS_ERR(clk)) {
		dev_err(dev, "Failed to get otg clock\n");
		return PTR_ERR(clk);
	}

	sphy->dev = dev;

	if (dev->of_node) {
		ret = samsung_usbphy_parse_dt(sphy);
		if (ret < 0)
			return ret;
	} else {
		if (!pdata) {
			dev_err(dev, "no platform data specified\n");
			return -EINVAL;
		}
	}

	sphy->plat		= pdata;
	sphy->regs		= phy_base;
	sphy->clk		= clk;
	sphy->drv_data		= drv_data;
	sphy->phy.dev		= sphy->dev;
	sphy->phy.label		= "samsung-usb2phy";
	sphy->phy.type		= USB_PHY_TYPE_USB2;
	sphy->phy.init		= samsung_usb2phy_init;
	sphy->phy.shutdown	= samsung_usb2phy_shutdown;
	sphy->phy.is_active	= samsung_usb2phy_is_active;
	sphy->ref_clk_freq	= samsung_usbphy_get_refclk_freq(sphy);

	sphy->phy.otg		= otg;
	sphy->phy.otg->phy	= &sphy->phy;
	sphy->phy.otg->set_host = samsung_usbphy_set_host;

	if (of_property_read_u32(sphy->dev->of_node,
		"samsung,hsicphy_en_mask", (u32 *)&drv_data->hsicphy_en_mask))
		dev_dbg(dev, "Failed to get hsicphy_en_mask\n");
	else if (of_property_read_u32(sphy->dev->of_node,
		"samsung,hsicphy_reg_offset",
		(u32 *)&drv_data->hsicphy_reg_offset))
		dev_dbg(dev, "Failed to get hsicphy_en_mask\n");
	else
		sphy->has_hsic_pmureg = true;

	spin_lock_init(&sphy->lock);

	ret = clk_prepare(sphy->clk);
	if (ret) {
		dev_err(dev, "clk_prepare failed\n");
		return ret;
	}

	platform_set_drvdata(pdev, sphy);

	ret = usb_add_phy_dev(&sphy->phy);
	if (ret) {
		dev_err(dev, "Failed to add PHY\n");
		goto err1;
	}

	return 0;

err1:
	clk_unprepare(sphy->clk);

	return ret;
}
Beispiel #13
0
static int sh_tmu_setup(struct sh_tmu_priv *p, struct platform_device *pdev)
{
	struct sh_timer_config *cfg = pdev->dev.platform_data;
	struct resource *res;
	int irq, ret;
	ret = -ENXIO;

	memset(p, 0, sizeof(*p));
	p->pdev = pdev;

	if (!cfg) {
		dev_err(&p->pdev->dev, "missing platform data\n");
		goto err0;
	}

	platform_set_drvdata(pdev, p);

	res = platform_get_resource(p->pdev, IORESOURCE_MEM, 0);
	if (!res) {
		dev_err(&p->pdev->dev, "failed to get I/O memory\n");
		goto err0;
	}

	irq = platform_get_irq(p->pdev, 0);
	if (irq < 0) {
		dev_err(&p->pdev->dev, "failed to get irq\n");
		goto err0;
	}

	/* map memory, let mapbase point to our channel */
	p->mapbase = ioremap_nocache(res->start, resource_size(res));
	if (p->mapbase == NULL) {
		dev_err(&p->pdev->dev, "failed to remap I/O memory\n");
		goto err0;
	}

	/* setup data for setup_irq() (too early for request_irq()) */
	p->irqaction.name = dev_name(&p->pdev->dev);
	p->irqaction.handler = sh_tmu_interrupt;
	p->irqaction.dev_id = p;
	p->irqaction.irq = irq;
	p->irqaction.flags = IRQF_TIMER | IRQF_IRQPOLL | IRQF_NOBALANCING;

	/* get hold of clock */
	p->clk = clk_get(&p->pdev->dev, "tmu_fck");
	if (IS_ERR(p->clk)) {
		dev_err(&p->pdev->dev, "cannot get clock\n");
		ret = PTR_ERR(p->clk);
		goto err1;
	}

	ret = clk_prepare(p->clk);
	if (ret < 0)
		goto err2;

	p->cs_enabled = false;
	p->enable_count = 0;

	ret = sh_tmu_register(p, (char *)dev_name(&p->pdev->dev),
			      cfg->clockevent_rating,
			      cfg->clocksource_rating);
	if (ret < 0)
		goto err3;

	return 0;

 err3:
	clk_unprepare(p->clk);
 err2:
	clk_put(p->clk);
 err1:
	iounmap(p->mapbase);
 err0:
	return ret;
}
Beispiel #14
0
static int g2d_probe(struct platform_device *pdev)
{
    struct g2d_dev *dev;
    struct video_device *vfd;
    struct resource *res;
    int ret = 0;

    dev = kzalloc(sizeof(*dev), GFP_KERNEL);
    if (!dev)
        return -ENOMEM;
    spin_lock_init(&dev->ctrl_lock);
    mutex_init(&dev->mutex);
    atomic_set(&dev->num_inst, 0);
    init_waitqueue_head(&dev->irq_queue);

    res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
    if (!res) {
        dev_err(&pdev->dev, "failed to find registers\n");
        ret = -ENOENT;
        goto free_dev;
    }

    dev->res_regs = request_mem_region(res->start, resource_size(res),
                                       dev_name(&pdev->dev));

    if (!dev->res_regs) {
        dev_err(&pdev->dev, "failed to obtain register region\n");
        ret = -ENOENT;
        goto free_dev;
    }

    dev->regs = ioremap(res->start, resource_size(res));
    if (!dev->regs) {
        dev_err(&pdev->dev, "failed to map registers\n");
        ret = -ENOENT;
        goto rel_res_regs;
    }

    dev->clk = clk_get(&pdev->dev, "sclk_fimg2d");
    if (IS_ERR_OR_NULL(dev->clk)) {
        dev_err(&pdev->dev, "failed to get g2d clock\n");
        ret = -ENXIO;
        goto unmap_regs;
    }

    ret = clk_prepare(dev->clk);
    if (ret) {
        dev_err(&pdev->dev, "failed to prepare g2d clock\n");
        goto put_clk;
    }

    dev->gate = clk_get(&pdev->dev, "fimg2d");
    if (IS_ERR_OR_NULL(dev->gate)) {
        dev_err(&pdev->dev, "failed to get g2d clock gate\n");
        ret = -ENXIO;
        goto unprep_clk;
    }

    ret = clk_prepare(dev->gate);
    if (ret) {
        dev_err(&pdev->dev, "failed to prepare g2d clock gate\n");
        goto put_clk_gate;
    }

    res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
    if (!res) {
        dev_err(&pdev->dev, "failed to find IRQ\n");
        ret = -ENXIO;
        goto unprep_clk_gate;
    }

    dev->irq = res->start;

    ret = request_irq(dev->irq, g2d_isr, 0, pdev->name, dev);
    if (ret) {
        dev_err(&pdev->dev, "failed to install IRQ\n");
        goto put_clk_gate;
    }

    dev->alloc_ctx = vb2_dma_contig_init_ctx(&pdev->dev);
    if (IS_ERR(dev->alloc_ctx)) {
        ret = PTR_ERR(dev->alloc_ctx);
        goto rel_irq;
    }

    ret = v4l2_device_register(&pdev->dev, &dev->v4l2_dev);
    if (ret)
        goto alloc_ctx_cleanup;
    vfd = video_device_alloc();
    if (!vfd) {
        v4l2_err(&dev->v4l2_dev, "Failed to allocate video device\n");
        ret = -ENOMEM;
        goto unreg_v4l2_dev;
    }
    *vfd = g2d_videodev;
    vfd->lock = &dev->mutex;
    ret = video_register_device(vfd, VFL_TYPE_GRABBER, 0);
    if (ret) {
        v4l2_err(&dev->v4l2_dev, "Failed to register video device\n");
        goto rel_vdev;
    }
    video_set_drvdata(vfd, dev);
    snprintf(vfd->name, sizeof(vfd->name), "%s", g2d_videodev.name);
    dev->vfd = vfd;
    v4l2_info(&dev->v4l2_dev, "device registered as /dev/video%d\n",
              vfd->num);
    platform_set_drvdata(pdev, dev);
    dev->m2m_dev = v4l2_m2m_init(&g2d_m2m_ops);
    if (IS_ERR(dev->m2m_dev)) {
        v4l2_err(&dev->v4l2_dev, "Failed to init mem2mem device\n");
        ret = PTR_ERR(dev->m2m_dev);
        goto unreg_video_dev;
    }

    def_frame.stride = (def_frame.width * def_frame.fmt->depth) >> 3;

    return 0;

unreg_video_dev:
    video_unregister_device(dev->vfd);
rel_vdev:
    video_device_release(vfd);
unreg_v4l2_dev:
    v4l2_device_unregister(&dev->v4l2_dev);
alloc_ctx_cleanup:
    vb2_dma_contig_cleanup_ctx(dev->alloc_ctx);
rel_irq:
    free_irq(dev->irq, dev);
unprep_clk_gate:
    clk_unprepare(dev->gate);
put_clk_gate:
    clk_put(dev->gate);
unprep_clk:
    clk_unprepare(dev->clk);
put_clk:
    clk_put(dev->clk);
unmap_regs:
    iounmap(dev->regs);
rel_res_regs:
    release_resource(dev->res_regs);
free_dev:
    kfree(dev);
    return ret;
}
static int ftr_probe(struct platform_device *pdev)
{
	struct device_node *np = pdev->dev.of_node;
	struct ftr_dev_node_info *ptr;
	struct resource *mem_res;
	struct clk *pdm_clk;
	int ret;
	u8 version = 0;

	pr_debug("%s: me = %p, parent = %p\n",
		__func__, pdev, pdev->dev.parent);

	mutex_lock(&rficlock);

	if (n_dev >= RFIC_DEVICE_NUM) {
		pr_warn("%s: Invalid devices %d\n", __func__, n_dev);
		mutex_unlock(&rficlock);
		return -EINVAL;
	}

	if (!n_dev) {
		rfbid = rf_interface_id();
		if ((rfbid != 0xff) && (rfbid != 0))
			rfbid = rfbid & RF_TYPE_48;

		switch (rfbid) {
		case RF_TYPE_16:
			ftr_regulator_init(pdev);
			break;
		case RF_TYPE_32:
			glu_regulator_init(pdev);
			break;
		case RF_TYPE_48:
			mtr_regulator_init(pdev);
			break;
		default:
			pr_warn("%s:Regulators not turned ON %d\n",
					__func__, rfbid);
		}

		rfbid = rf_interface_id();
		pr_info("%s: RF Board Id 0x%x\n", __func__, rfbid);

		mem_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
		grfc_base = devm_ioremap_resource(&pdev->dev, mem_res);
		if (IS_ERR(grfc_base)) {
			mutex_unlock(&rficlock);
			return PTR_ERR(grfc_base);
		}

		mem_res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
		wf_base = devm_ioremap_resource(&pdev->dev, mem_res);
		if (IS_ERR(wf_base)) {
			mutex_unlock(&rficlock);
			return PTR_ERR(wf_base);
		}

		mem_res = platform_get_resource(pdev, IORESOURCE_MEM, 2);
		pdm_base = devm_ioremap_resource(&pdev->dev, mem_res);
		if (IS_ERR(pdm_base)) {
			mutex_unlock(&rficlock);
			return PTR_ERR(pdm_base);
		}

		ret = device_create_file(&pdev->dev, &dev_attr_rfboard_id);
		WARN_ON(ret);

		pdm_clk = clk_get(&pdev->dev, "ahb_clk");
		if (IS_ERR(pdm_clk)) {
			pdm_clk = NULL;
			pr_err("%s: AHB CLK is  NULL\n", __func__);
		} else {
			pr_debug("%s: AHB CLK is  0x%x\n", __func__,
				(unsigned int)pdm_clk);
			clk_prepare(pdm_clk);
			clk_enable(pdm_clk);
		}

		pdm_clk = clk_get(&pdev->dev, "pdm2_clk");
		if (IS_ERR(pdm_clk)) {
			pdm_clk = NULL;
			pr_err("%s: PDM2 CLK is  NULL\n", __func__);
		} else {
			pr_debug("%s: PDM2 CLK is  0x%x\n", __func__,
				(unsigned int)pdm_clk);
			clk_prepare(pdm_clk);
			clk_enable(pdm_clk);
		}

		if ((rfbid > RF_TYPE_48) && (rfbid != 0xff)) {
			fsm9900_mtr_init();
			pdm_mtr_enable();
			pr_info("%s: MTR PDM Enabled\n", __func__);
		} else if ((rfbid > RF_TYPE_16) && (rfbid < RF_TYPE_32)) {
			fsm9900_rfic_init();
			pdm_enable();
			pr_info("%s: PDM Enabled\n", __func__);
		} else if ((rfbid > RF_TYPE_32) && (rfbid < RF_TYPE_48)) {
			fsm9900_gluon_init();
			pr_info("%s: Gluon Enabled\n", __func__);
		} else {
			pr_warn("%s:PDMs not configured %d\n",
					__func__, rfbid);
		}

	}

	ptr = ftr_dev_info + n_dev;
	ptr->dev = &pdev->dev;

	if ((n_dev >= 1)  && (n_dev <= 7)) {
		struct ssbi *ssbi =
		platform_get_drvdata(to_platform_device(pdev->dev.parent));
		if ((rfbid > RF_TYPE_48) && (n_dev <= 4)) {
			ssbi->controller_type =
				FSM_SBI_CTRL_GENI_SSBI2_ARBITER;
			set_ssbi_mode_2(ssbi->base);
			pr_debug("%s: SSBI2 = 0x%x\n", __func__,
				ssbi->controller_type);
			rfic_pvc_enable(ssbi->base, 3);
		} else {
			ssbi->controller_type =
				FSM_SBI_CTRL_GENI_SSBI_ARBITER;
			set_ssbi_mode_1(ssbi->base);
			pr_debug("%s: SSBI1 = 0x%x\n", __func__,
				ssbi->controller_type);
			if ((n_dev == 1) || (n_dev == 2))
				rfic_pvc_enable(ssbi->base, 1);
			if ((n_dev == 3) && (rfbid > RF_TYPE_16)
					&& (rfbid < RF_TYPE_32))
				rfic_pvc_enable(ssbi->base, 2);
		}
		platform_set_drvdata(to_platform_device(pdev->dev.parent),
				ssbi);
	}

	if ((rfbid > RF_TYPE_16) && (rfbid < RF_TYPE_48) && (n_dev == 1)) {
		ssbi_write(pdev->dev.parent, 0xff, &version, 1);
		ssbi_read(pdev->dev.parent, 0x2, &version, 1);
		pr_info("%s: FTR1 Version = %02x\n", __func__, version);

		ptr->grfcctrladdr = grfc_base + 0x10; /* grp 4 */
		ptr->grfcmaskaddr = grfc_base + 0x30;
		__raw_writel(0x00001800, ptr->grfcmaskaddr);
		ptr->maskvalue = 0x00001800;
		ptr->busselect[TX1_BUS] = 0x00000000;
		ptr->busselect[TX2_BUS] = 0x00001000;
		ptr->busselect[MISC_BUS] = 0x00000800;
		ptr->busselect[RX_BUS] = 0x00001800;
	} else if ((rfbid > RF_TYPE_16) && (rfbid < RF_TYPE_48) &&
		(n_dev == 2)) {
		ssbi_write(pdev->dev.parent, 0xff, &version, 1);
		ssbi_read(pdev->dev.parent, 0x2, &version, 1);
		pr_info("%s: FTR2 Version = %02x\n", __func__, version);

		ptr->grfcctrladdr = grfc_base + 0x14; /* grp 5*/
		ptr->grfcmaskaddr = grfc_base + 0x34;
		__raw_writel(0x00000600, ptr->grfcmaskaddr);
		ptr->maskvalue = 0x00000600;
		ptr->busselect[TX1_BUS] = 0x000000;
		ptr->busselect[TX2_BUS] = 0x00000400;
		ptr->busselect[MISC_BUS] = 0x00000200;
		ptr->busselect[RX_BUS] = 0x00000600;
	}

	mutex_init(&ptr->lock);

	if (rfbid < RF_TYPE_48) {
		ret = misc_register(ftr_misc_dev + n_dev);

		if (ret < 0) {
			misc_deregister(ftr_misc_dev + n_dev);
			mutex_unlock(&rficlock);
			return ret;
		}
	} else {
		ret = misc_register(mtr_misc_dev + n_dev);

		if (ret < 0) {
			misc_deregister(mtr_misc_dev + n_dev);
			mutex_unlock(&rficlock);
			return ret;
		}
	}

	n_dev++;
	pr_debug("%s: num_of_ssbi_devices = %d\n", __func__, n_dev);
	mutex_unlock(&rficlock);

	return of_platform_populate(np, NULL, NULL, &pdev->dev);
}
Beispiel #16
0
static int fsl_dcu_drm_probe(struct platform_device *pdev)
{
	struct fsl_dcu_drm_device *fsl_dev;
	struct drm_device *drm;
	struct device *dev = &pdev->dev;
	struct resource *res;
	void __iomem *base;
	struct drm_driver *driver = &fsl_dcu_drm_driver;
	const struct of_device_id *id;
	int ret;

	fsl_dev = devm_kzalloc(dev, sizeof(*fsl_dev), GFP_KERNEL);
	if (!fsl_dev)
		return -ENOMEM;

	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
	if (!res) {
		dev_err(dev, "could not get memory IO resource\n");
		return -ENODEV;
	}

	base = devm_ioremap_resource(dev, res);
	if (IS_ERR(base)) {
		ret = PTR_ERR(base);
		return ret;
	}

	fsl_dev->irq = platform_get_irq(pdev, 0);
	if (fsl_dev->irq < 0) {
		dev_err(dev, "failed to get irq\n");
		return -ENXIO;
	}

	fsl_dev->clk = devm_clk_get(dev, "dcu");
	if (IS_ERR(fsl_dev->clk)) {
		ret = PTR_ERR(fsl_dev->clk);
		dev_err(dev, "failed to get dcu clock\n");
		return ret;
	}
	ret = clk_prepare(fsl_dev->clk);
	if (ret < 0) {
		dev_err(dev, "failed to prepare dcu clk\n");
		return ret;
	}
	ret = clk_enable(fsl_dev->clk);
	if (ret < 0) {
		dev_err(dev, "failed to enable dcu clk\n");
		clk_unprepare(fsl_dev->clk);
		return ret;
	}

	fsl_dev->regmap = devm_regmap_init_mmio(dev, base,
			&fsl_dcu_regmap_config);
	if (IS_ERR(fsl_dev->regmap)) {
		dev_err(dev, "regmap init failed\n");
		return PTR_ERR(fsl_dev->regmap);
	}

	id = of_match_node(fsl_dcu_of_match, pdev->dev.of_node);
	if (!id)
		return -ENODEV;
	fsl_dev->soc = id->data;

	drm = drm_dev_alloc(driver, dev);
	if (!drm)
		return -ENOMEM;

	fsl_dev->dev = dev;
	fsl_dev->drm = drm;
	fsl_dev->np = dev->of_node;
	drm->dev_private = fsl_dev;
	dev_set_drvdata(dev, fsl_dev);

	ret = drm_dev_register(drm, 0);
	if (ret < 0)
		goto unref;

	DRM_INFO("Initialized %s %d.%d.%d %s on minor %d\n", driver->name,
		 driver->major, driver->minor, driver->patchlevel,
		 driver->date, drm->primary->index);

	return 0;

unref:
	drm_dev_unref(drm);
	return ret;
}
Beispiel #17
0
static int serial_pxa_probe(struct platform_device *dev)
{
	struct uart_pxa_port *sport;
	struct resource *mmres, *irqres;
	int ret;

	mmres = platform_get_resource(dev, IORESOURCE_MEM, 0);
	irqres = platform_get_resource(dev, IORESOURCE_IRQ, 0);
	if (!mmres || !irqres)
		return -ENODEV;

	sport = kzalloc(sizeof(struct uart_pxa_port), GFP_KERNEL);
	if (!sport)
		return -ENOMEM;

	sport->clk = clk_get(&dev->dev, NULL);
	if (IS_ERR(sport->clk)) {
		ret = PTR_ERR(sport->clk);
		goto err_free;
	}

	ret = clk_prepare(sport->clk);
	if (ret) {
		clk_put(sport->clk);
		goto err_free;
	}

	sport->port.type = PORT_PXA;
	sport->port.iotype = UPIO_MEM;
	sport->port.mapbase = mmres->start;
	sport->port.irq = irqres->start;
	sport->port.fifosize = 64;
	sport->port.ops = &serial_pxa_pops;
	sport->port.dev = &dev->dev;
	sport->port.flags = UPF_IOREMAP | UPF_BOOT_AUTOCONF;
	sport->port.uartclk = clk_get_rate(sport->clk);

	ret = serial_pxa_probe_dt(dev, sport);
	if (ret > 0)
		sport->port.line = dev->id;
	else if (ret < 0)
		goto err_clk;
	snprintf(sport->name, PXA_NAME_LEN - 1, "UART%d", sport->port.line + 1);

	sport->port.membase = ioremap(mmres->start, resource_size(mmres));
	if (!sport->port.membase) {
		ret = -ENOMEM;
		goto err_clk;
	}

	serial_pxa_ports[sport->port.line] = sport;

	uart_add_one_port(&serial_pxa_reg, &sport->port);
	platform_set_drvdata(dev, sport);

	return 0;

 err_clk:
	clk_unprepare(sport->clk);
	clk_put(sport->clk);
 err_free:
	kfree(sport);
	return ret;
}
static int mv_ehci_probe(struct platform_device *pdev)
{
	struct mv_usb_platform_data *pdata;
	struct device *dev = &pdev->dev;
	struct usb_hcd *hcd;
	struct ehci_hcd *ehci;
	struct ehci_hcd_mv *ehci_mv;
	struct resource *r;
	int retval = -ENODEV;
	u32 offset;

	pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
	if (!pdata) {
		dev_err(&pdev->dev, "failed to allocate memory for platform_data\n");
		return -ENODEV;
	}
	mv_ehci_dt_parse(pdev, pdata);
	/*
	 * Right now device-tree probed devices don't get dma_mask set.
	 * Since shared usb code relies on it, set it here for now.
	 * Once we have dma capability bindings this can go away.
	 */
	if (!dev->dma_mask)
		dev->dma_mask = &dev->coherent_dma_mask;
	if (!dev->coherent_dma_mask)
		dev->coherent_dma_mask = DMA_BIT_MASK(32);

	if (usb_disabled())
		return -ENODEV;

	hcd = usb_create_hcd(&mv_ehci_hc_driver, &pdev->dev, "mv ehci");
	if (!hcd)
		return -ENOMEM;

	ehci_mv = devm_kzalloc(&pdev->dev, sizeof(*ehci_mv), GFP_KERNEL);
	if (ehci_mv == NULL) {
		dev_err(&pdev->dev, "cannot allocate ehci_hcd_mv\n");
		retval = -ENOMEM;
		goto err_put_hcd;
	}

	platform_set_drvdata(pdev, ehci_mv);
	ehci_mv->pdata = pdata;
	ehci_mv->hcd = hcd;

	ehci_mv->clk = devm_clk_get(&pdev->dev, NULL);
	if (IS_ERR(ehci_mv->clk)) {
		dev_err(&pdev->dev, "error getting clock\n");
		retval = PTR_ERR(ehci_mv->clk);
		goto err_clear_drvdata;
	}
	clk_prepare(ehci_mv->clk);

	r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
	if (!r) {
		dev_err(&pdev->dev, "no I/O memory resource defined\n");
		retval = -ENODEV;
		goto err_clear_drvdata;
	}

	ehci_mv->cap_regs = devm_ioremap(&pdev->dev, r->start,
					 resource_size(r));
	if (ehci_mv->cap_regs == NULL) {
		dev_err(&pdev->dev, "failed to map I/O memory\n");
		retval = -EFAULT;
		goto err_clear_drvdata;
	}

	ehci_mv->phy = devm_usb_get_phy_dev(&pdev->dev, MV_USB2_PHY_INDEX);
	if (IS_ERR_OR_NULL(ehci_mv->phy)) {
		retval = PTR_ERR(ehci_mv->phy);
		if (retval != -EPROBE_DEFER && retval != -ENODEV)
			dev_err(&pdev->dev, "failed to get the outer phy\n");
		else {
			kfree(hcd->bandwidth_mutex);
			kfree(hcd);

			return -EPROBE_DEFER;
		}
		goto err_clear_drvdata;
	}

	retval = mv_ehci_enable(ehci_mv);
	if (retval) {
		dev_err(&pdev->dev, "init phy error %d\n", retval);
		goto err_clear_drvdata;
	}

	offset = readl(ehci_mv->cap_regs) & CAPLENGTH_MASK;
	ehci_mv->op_regs =
		(void __iomem *) ((unsigned long) ehci_mv->cap_regs + offset);

	hcd->rsrc_start = r->start;
	hcd->rsrc_len = resource_size(r);
	hcd->regs = ehci_mv->op_regs;

	hcd->irq = platform_get_irq(pdev, 0);
	if (!hcd->irq) {
		dev_err(&pdev->dev, "Cannot get irq.");
		retval = -ENODEV;
		goto err_disable_clk;
	}

	ehci = hcd_to_ehci(hcd);
	ehci->caps = (struct ehci_caps *) ehci_mv->cap_regs;

	ehci_mv->mode = pdata->mode;
	if (ehci_mv->mode == MV_USB_MODE_OTG) {
		ehci_mv->otg = devm_usb_get_phy_dev(&pdev->dev,
						MV_USB2_OTG_PHY_INDEX);
		if (IS_ERR(ehci_mv->otg)) {
			retval = PTR_ERR(ehci_mv->otg);

			if (retval == -ENXIO)
				dev_info(&pdev->dev, "MV_USB_MODE_OTG "
						"must have CONFIG_USB_PHY enabled\n");
			else if (retval != -EPROBE_DEFER)
				dev_err(&pdev->dev,
						"unable to find transceiver\n");
			goto err_disable_clk;
		}

		retval = otg_set_host(ehci_mv->otg->otg, &hcd->self);
		if (retval < 0) {
			dev_err(&pdev->dev,
				"unable to register with transceiver\n");
			retval = -ENODEV;
			goto err_disable_clk;
		}
		/* otg will enable clock before use as host */
		mv_ehci_disable(ehci_mv);
	} else {
		pxa_usb_extern_call(pdata->id, vbus, set_vbus, 1);

		retval = usb_add_hcd(hcd, hcd->irq, IRQF_SHARED);
		if (retval) {
			dev_err(&pdev->dev,
				"failed to add hcd with err %d\n", retval);
			goto err_set_vbus;
		}
	}

	dev_info(&pdev->dev,
		 "successful find EHCI device with regs 0x%p irq %d"
		 " working in %s mode\n", hcd->regs, hcd->irq,
		 ehci_mv->mode == MV_USB_MODE_OTG ? "OTG" : "Host");

	return 0;

err_set_vbus:
	pxa_usb_extern_call(pdata->id, vbus, set_vbus, 0);
err_disable_clk:
	mv_ehci_disable(ehci_mv);
err_clear_drvdata:
	platform_set_drvdata(pdev, NULL);
err_put_hcd:
	usb_put_hcd(hcd);

	return retval;
}
Beispiel #19
0
static int sh_cmt_setup(struct sh_cmt_priv *p, struct platform_device *pdev)
{
	struct sh_timer_config *cfg = pdev->dev.platform_data;
	struct resource *res, *res2;
	int irq, ret;
	ret = -ENXIO;

	memset(p, 0, sizeof(*p));
	p->pdev = pdev;

	if (!cfg) {
		dev_err(&p->pdev->dev, "missing platform data\n");
		goto err0;
	}

	res = platform_get_resource(p->pdev, IORESOURCE_MEM, 0);
	if (!res) {
		dev_err(&p->pdev->dev, "failed to get I/O memory\n");
		goto err0;
	}

	/* optional resource for the shared timer start/stop register */
	res2 = platform_get_resource(p->pdev, IORESOURCE_MEM, 1);

	irq = platform_get_irq(p->pdev, 0);
	if (irq < 0) {
		dev_err(&p->pdev->dev, "failed to get irq\n");
		goto err0;
	}

	/* map memory, let mapbase point to our channel */
	p->mapbase = ioremap_nocache(res->start, resource_size(res));
	if (p->mapbase == NULL) {
		dev_err(&p->pdev->dev, "failed to remap I/O memory\n");
		goto err0;
	}

	/* map second resource for CMSTR */
	p->mapbase_str = ioremap_nocache(res2 ? res2->start :
					 res->start - cfg->channel_offset,
					 res2 ? resource_size(res2) : 2);
	if (p->mapbase_str == NULL) {
		dev_err(&p->pdev->dev, "failed to remap I/O second memory\n");
		goto err1;
	}

	/* request irq using setup_irq() (too early for request_irq()) */
	p->irqaction.name = dev_name(&p->pdev->dev);
	p->irqaction.handler = sh_cmt_interrupt;
	p->irqaction.dev_id = p;
	p->irqaction.flags = IRQF_TIMER | IRQF_IRQPOLL | IRQF_NOBALANCING;

	/* get hold of clock */
	p->clk = clk_get(&p->pdev->dev, "cmt_fck");
	if (IS_ERR(p->clk)) {
		dev_err(&p->pdev->dev, "cannot get clock\n");
		ret = PTR_ERR(p->clk);
		goto err2;
	}

	ret = clk_prepare(p->clk);
	if (ret < 0)
		goto err3;

	if (res2 && (resource_size(res2) == 4)) {
		/* assume both CMSTR and CMCSR to be 32-bit */
		p->read_control = sh_cmt_read32;
		p->write_control = sh_cmt_write32;
	} else {
		p->read_control = sh_cmt_read16;
		p->write_control = sh_cmt_write16;
	}

	if (resource_size(res) == 6) {
		p->width = 16;
		p->read_count = sh_cmt_read16;
		p->write_count = sh_cmt_write16;
		p->overflow_bit = 0x80;
		p->clear_bits = ~0x80;
	} else {
		p->width = 32;
		p->read_count = sh_cmt_read32;
		p->write_count = sh_cmt_write32;
		p->overflow_bit = 0x8000;
		p->clear_bits = ~0xc000;
	}

	if (p->width == (sizeof(p->max_match_value) * 8))
		p->max_match_value = ~0;
	else
		p->max_match_value = (1 << p->width) - 1;

	p->match_value = p->max_match_value;
	raw_spin_lock_init(&p->lock);

	ret = sh_cmt_register(p, (char *)dev_name(&p->pdev->dev),
			      cfg->clockevent_rating,
			      cfg->clocksource_rating);
	if (ret) {
		dev_err(&p->pdev->dev, "registration failed\n");
		goto err4;
	}
	p->cs_enabled = false;

	ret = setup_irq(irq, &p->irqaction);
	if (ret) {
		dev_err(&p->pdev->dev, "failed to request irq %d\n", irq);
		goto err4;
	}

	platform_set_drvdata(pdev, p);

	return 0;
err4:
	clk_unprepare(p->clk);
err3:
	clk_put(p->clk);
err2:
	iounmap(p->mapbase_str);
err1:
	iounmap(p->mapbase);
err0:
	return ret;
}
Beispiel #20
0
static int omap_usb2_probe(struct platform_device *pdev)
{
	struct omap_usb	*phy;
	struct phy *generic_phy;
	struct resource *res;
	struct phy_provider *phy_provider;
	struct usb_otg *otg;
	struct device_node *node = pdev->dev.of_node;
	struct device_node *control_node;
	struct platform_device *control_pdev;
	const struct of_device_id *of_id;
	struct usb_phy_data *phy_data;

	of_id = of_match_device(of_match_ptr(omap_usb2_id_table), &pdev->dev);

	if (!of_id)
		return -EINVAL;

	phy_data = (struct usb_phy_data *)of_id->data;

	phy = devm_kzalloc(&pdev->dev, sizeof(*phy), GFP_KERNEL);
	if (!phy) {
		dev_err(&pdev->dev, "unable to allocate memory for USB2 PHY\n");
		return -ENOMEM;
	}

	otg = devm_kzalloc(&pdev->dev, sizeof(*otg), GFP_KERNEL);
	if (!otg) {
		dev_err(&pdev->dev, "unable to allocate memory for USB OTG\n");
		return -ENOMEM;
	}

	phy->dev		= &pdev->dev;

	phy->phy.dev		= phy->dev;
	phy->phy.label		= phy_data->label;
	phy->phy.otg		= otg;
	phy->phy.type		= USB_PHY_TYPE_USB2;

	if (phy_data->flags & OMAP_USB2_CALIBRATE_FALSE_DISCONNECT) {
		res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
		phy->phy_base = devm_ioremap_resource(&pdev->dev, res);
		if (IS_ERR(phy->phy_base))
			return PTR_ERR(phy->phy_base);
		phy->flags |= OMAP_USB2_CALIBRATE_FALSE_DISCONNECT;
	}

	control_node = of_parse_phandle(node, "ctrl-module", 0);
	if (!control_node) {
		dev_err(&pdev->dev, "Failed to get control device phandle\n");
		return -EINVAL;
	}

	control_pdev = of_find_device_by_node(control_node);
	if (!control_pdev) {
		dev_err(&pdev->dev, "Failed to get control device\n");
		return -EINVAL;
	}

	phy->control_dev = &control_pdev->dev;
	omap_control_phy_power(phy->control_dev, 0);

	otg->set_host		= omap_usb_set_host;
	otg->set_peripheral	= omap_usb_set_peripheral;
	if (phy_data->flags & OMAP_USB2_HAS_SET_VBUS)
		otg->set_vbus		= omap_usb_set_vbus;
	if (phy_data->flags & OMAP_USB2_HAS_START_SRP)
		otg->start_srp		= omap_usb_start_srp;
	otg->phy		= &phy->phy;

	platform_set_drvdata(pdev, phy);

	generic_phy = devm_phy_create(phy->dev, &ops, NULL);
	if (IS_ERR(generic_phy))
		return PTR_ERR(generic_phy);

	phy_set_drvdata(generic_phy, phy);

	pm_runtime_enable(phy->dev);
	phy_provider = devm_of_phy_provider_register(phy->dev,
			of_phy_simple_xlate);
	if (IS_ERR(phy_provider)) {
		pm_runtime_disable(phy->dev);
		return PTR_ERR(phy_provider);
	}

	phy->wkupclk = devm_clk_get(phy->dev, "wkupclk");
	if (IS_ERR(phy->wkupclk)) {
		dev_warn(&pdev->dev, "unable to get wkupclk, trying old name\n");
		phy->wkupclk = devm_clk_get(phy->dev, "usb_phy_cm_clk32k");
		if (IS_ERR(phy->wkupclk)) {
			dev_err(&pdev->dev, "unable to get usb_phy_cm_clk32k\n");
			return PTR_ERR(phy->wkupclk);
		} else {
			dev_warn(&pdev->dev,
				 "found usb_phy_cm_clk32k, please fix DTS\n");
		}
	}
	clk_prepare(phy->wkupclk);

	phy->optclk = devm_clk_get(phy->dev, "refclk");
	if (IS_ERR(phy->optclk)) {
		dev_dbg(&pdev->dev, "unable to get refclk, trying old name\n");
		phy->optclk = devm_clk_get(phy->dev, "usb_otg_ss_refclk960m");
		if (IS_ERR(phy->optclk)) {
			dev_dbg(&pdev->dev,
				"unable to get usb_otg_ss_refclk960m\n");
		} else {
			dev_warn(&pdev->dev,
				 "found usb_otg_ss_refclk960m, please fix DTS\n");
		}
	} else {
		clk_prepare(phy->optclk);
	}

	usb_add_phy_dev(&phy->phy);

	return 0;
}
Beispiel #21
0
static int st_rproc_parse_dt(struct platform_device *pdev)
{
	struct device *dev = &pdev->dev;
	struct rproc *rproc = platform_get_drvdata(pdev);
	struct st_rproc *ddata = rproc->priv;
	struct device_node *np = dev->of_node;
	int err;

	if (ddata->config->sw_reset) {
		ddata->sw_reset = devm_reset_control_get(dev, "sw_reset");
		if (IS_ERR(ddata->sw_reset)) {
			dev_err(dev, "Failed to get S/W Reset\n");
			return PTR_ERR(ddata->sw_reset);
		}
	}

	if (ddata->config->pwr_reset) {
		ddata->pwr_reset = devm_reset_control_get(dev, "pwr_reset");
		if (IS_ERR(ddata->pwr_reset)) {
			dev_err(dev, "Failed to get Power Reset\n");
			return PTR_ERR(ddata->pwr_reset);
		}
	}

	ddata->clk = devm_clk_get(dev, NULL);
	if (IS_ERR(ddata->clk)) {
		dev_err(dev, "Failed to get clock\n");
		return PTR_ERR(ddata->clk);
	}

	err = of_property_read_u32(np, "clock-frequency", &ddata->clk_rate);
	if (err) {
		dev_err(dev, "failed to get clock frequency\n");
		return err;
	}

	ddata->boot_base = syscon_regmap_lookup_by_phandle(np, "st,syscfg");
	if (IS_ERR(ddata->boot_base)) {
		dev_err(dev, "Boot base not found\n");
		return PTR_ERR(ddata->boot_base);
	}

	err = of_property_read_u32_index(np, "st,syscfg", 1,
					 &ddata->boot_offset);
	if (err) {
		dev_err(dev, "Boot offset not found\n");
		return -EINVAL;
	}

	err = of_reserved_mem_device_init(dev);
	if (err) {
		dev_err(dev, "Failed to obtain shared memory\n");
		return err;
	}

	err = clk_prepare(ddata->clk);
	if (err)
		dev_err(dev, "failed to get clock\n");

	return err;
}
Beispiel #22
0
Datei: g2d.c Projekt: 7799/linux
static int g2d_probe(struct platform_device *pdev)
{
	struct g2d_dev *dev;
	struct video_device *vfd;
	struct resource *res;
	const struct of_device_id *of_id;
	int ret = 0;

	dev = devm_kzalloc(&pdev->dev, sizeof(*dev), GFP_KERNEL);
	if (!dev)
		return -ENOMEM;

	spin_lock_init(&dev->ctrl_lock);
	mutex_init(&dev->mutex);
	atomic_set(&dev->num_inst, 0);
	init_waitqueue_head(&dev->irq_queue);

	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);

	dev->regs = devm_ioremap_resource(&pdev->dev, res);
	if (IS_ERR(dev->regs))
		return PTR_ERR(dev->regs);

	dev->clk = clk_get(&pdev->dev, "sclk_fimg2d");
	if (IS_ERR(dev->clk)) {
		dev_err(&pdev->dev, "failed to get g2d clock\n");
		return -ENXIO;
	}

	ret = clk_prepare(dev->clk);
	if (ret) {
		dev_err(&pdev->dev, "failed to prepare g2d clock\n");
		goto put_clk;
	}

	dev->gate = clk_get(&pdev->dev, "fimg2d");
	if (IS_ERR(dev->gate)) {
		dev_err(&pdev->dev, "failed to get g2d clock gate\n");
		ret = -ENXIO;
		goto unprep_clk;
	}

	ret = clk_prepare(dev->gate);
	if (ret) {
		dev_err(&pdev->dev, "failed to prepare g2d clock gate\n");
		goto put_clk_gate;
	}

	res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
	if (!res) {
		dev_err(&pdev->dev, "failed to find IRQ\n");
		ret = -ENXIO;
		goto unprep_clk_gate;
	}

	dev->irq = res->start;

	ret = devm_request_irq(&pdev->dev, dev->irq, g2d_isr,
						0, pdev->name, dev);
	if (ret) {
		dev_err(&pdev->dev, "failed to install IRQ\n");
		goto put_clk_gate;
	}

	dev->alloc_ctx = vb2_dma_contig_init_ctx(&pdev->dev);
	if (IS_ERR(dev->alloc_ctx)) {
		ret = PTR_ERR(dev->alloc_ctx);
		goto unprep_clk_gate;
	}

	ret = v4l2_device_register(&pdev->dev, &dev->v4l2_dev);
	if (ret)
		goto alloc_ctx_cleanup;
	vfd = video_device_alloc();
	if (!vfd) {
		v4l2_err(&dev->v4l2_dev, "Failed to allocate video device\n");
		ret = -ENOMEM;
		goto unreg_v4l2_dev;
	}
	*vfd = g2d_videodev;
	vfd->lock = &dev->mutex;
	vfd->v4l2_dev = &dev->v4l2_dev;
	ret = video_register_device(vfd, VFL_TYPE_GRABBER, 0);
	if (ret) {
		v4l2_err(&dev->v4l2_dev, "Failed to register video device\n");
		goto rel_vdev;
	}
	video_set_drvdata(vfd, dev);
	snprintf(vfd->name, sizeof(vfd->name), "%s", g2d_videodev.name);
	dev->vfd = vfd;
	v4l2_info(&dev->v4l2_dev, "device registered as /dev/video%d\n",
								vfd->num);
	platform_set_drvdata(pdev, dev);
	dev->m2m_dev = v4l2_m2m_init(&g2d_m2m_ops);
	if (IS_ERR(dev->m2m_dev)) {
		v4l2_err(&dev->v4l2_dev, "Failed to init mem2mem device\n");
		ret = PTR_ERR(dev->m2m_dev);
		goto unreg_video_dev;
	}

	def_frame.stride = (def_frame.width * def_frame.fmt->depth) >> 3;

	if (!pdev->dev.of_node) {
		dev->variant = g2d_get_drv_data(pdev);
	} else {
		of_id = of_match_node(exynos_g2d_match, pdev->dev.of_node);
		if (!of_id) {
			ret = -ENODEV;
			goto unreg_video_dev;
		}
		dev->variant = (struct g2d_variant *)of_id->data;
	}

	return 0;

unreg_video_dev:
	video_unregister_device(dev->vfd);
rel_vdev:
	video_device_release(vfd);
unreg_v4l2_dev:
	v4l2_device_unregister(&dev->v4l2_dev);
alloc_ctx_cleanup:
	vb2_dma_contig_cleanup_ctx(dev->alloc_ctx);
unprep_clk_gate:
	clk_unprepare(dev->gate);
put_clk_gate:
	clk_put(dev->gate);
unprep_clk:
	clk_unprepare(dev->clk);
put_clk:
	clk_put(dev->clk);

	return ret;
}
Beispiel #23
0
static int nuc970_pwm_probe(struct platform_device *pdev)
{

	struct nuc970_chip *nuc970;
	struct pinctrl *p;
	int ret;

	nuc970 = devm_kzalloc(&pdev->dev, sizeof(*nuc970), GFP_KERNEL);
	if (nuc970 == NULL) {
		dev_err(&pdev->dev, "failed to allocate memory for pwm_device\n");
		return -ENOMEM;
	}
	/* calculate base of control bits in TCON */

	nuc970->chip.dev = &pdev->dev;
	nuc970->chip.ops = &nuc970_pwm_ops;
	//nuc970->chip.of_xlate = of_pwm_xlate_with_flags;
	//nuc970->chip.of_pwm_n_cells = 3;
	nuc970->chip.base = pdev->id;
	nuc970->chip.npwm = 1;

	nuc970->clk = clk_get(NULL, "pwm");
	if (IS_ERR(nuc970->clk)) {
		dev_err(&pdev->dev, "failed to get pwm clock\n");
		ret = PTR_ERR(nuc970->clk);
		return ret;
	}

	clk_prepare(nuc970->clk);
	clk_enable(nuc970->clk);
	// all channel prescale output div by 1
	__raw_writel(0x4444, REG_PWM_CSR);

	if(pdev->id == 0) {
#if defined (CONFIG_NUC970_PWM0_PA12)
		p = devm_pinctrl_get_select(&pdev->dev, "pwm0-PA");
#elif defined (CONFIG_NUC970_PWM0_PB2)
		p = devm_pinctrl_get_select(&pdev->dev, "pwm0-PB");
#elif defined (CONFIG_NUC970_PWM0_PC14)
		p = devm_pinctrl_get_select(&pdev->dev, "pwm0-PC");
#elif defined (CONFIG_NUC970_PWM0_PD12)
		p = devm_pinctrl_get_select(&pdev->dev, "pwm0-PD");
#endif

#ifndef CONFIG_NUC970_PWM0_NONE
		if(IS_ERR(p)) {
			dev_err(&pdev->dev, "unable to reserve output pin\n");

		}
#endif
	}
	if(pdev->id == 1) {
#if defined (CONFIG_NUC970_PWM1_PA13)
		p = devm_pinctrl_get_select(&pdev->dev, "pwm1-PA");
#elif defined (CONFIG_NUC970_PWM1_PB3)
		p = devm_pinctrl_get_select(&pdev->dev, "pwm1-PB");
#elif defined (CONFIG_NUC970_PWM1_PD13)
		p = devm_pinctrl_get_select(&pdev->dev, "pwm1-PD");
#endif

#ifndef CONFIG_NUC970_PWM1_NONE
		if(IS_ERR(p)) {
			dev_err(&pdev->dev, "unable to reserve output pin\n");
		}
#endif
	}
	if(pdev->id == 2) {
#if defined (CONFIG_NUC970_PWM2_PA14)
		p = devm_pinctrl_get_select(&pdev->dev, "pwm2-PA");
#elif defined (CONFIG_NUC970_PWM2_PH2)
		p = devm_pinctrl_get_select(&pdev->dev, "pwm2-PH");
#elif defined (CONFIG_NUC970_PWM2_PD14)
		p = devm_pinctrl_get_select(&pdev->dev, "pwm2-PD");
#endif

#ifndef CONFIG_NUC970_PWM2_NONE
		if(IS_ERR(p)) {
			dev_err(&pdev->dev, "unable to reserve output pin\n");
		}
#endif
	}
	if(pdev->id == 3) {
#if defined (CONFIG_NUC970_PWM3_PA15)
		p = devm_pinctrl_get_select(&pdev->dev, "pwm3-PA");
#elif defined (CONFIG_NUC970_PWM3_PH3)
		p = devm_pinctrl_get_select(&pdev->dev, "pwm3-PH");
#elif defined (CONFIG_NUC970_PWM3_PD15)
		p = devm_pinctrl_get_select(&pdev->dev, "pwm3-PD");
#endif

#ifndef CONFIG_NUC970_PWM3_NONE
		if(IS_ERR(p)) {
			dev_err(&pdev->dev, "unable to reserve output pin\n");
		}
#endif
	}

	ret = pwmchip_add(&nuc970->chip);
	if (ret < 0) {
		dev_err(&pdev->dev, "failed to register pwm\n");
		goto err;
	}

	platform_set_drvdata(pdev, nuc970);

	return 0;

err:
	//clk_disable(nuc970->clk);
	return ret;
}
Beispiel #24
0
static int bdisp_probe(struct platform_device *pdev)
{
	struct bdisp_dev *bdisp;
	struct resource *res;
	struct device *dev = &pdev->dev;
	int ret;

	dev_dbg(dev, "%s\n", __func__);

	bdisp = devm_kzalloc(dev, sizeof(struct bdisp_dev), GFP_KERNEL);
	if (!bdisp)
		return -ENOMEM;

	bdisp->pdev = pdev;
	bdisp->dev = dev;
	platform_set_drvdata(pdev, bdisp);

	if (dev->of_node)
		bdisp->id = of_alias_get_id(pdev->dev.of_node, BDISP_NAME);
	else
		bdisp->id = pdev->id;

	init_waitqueue_head(&bdisp->irq_queue);
	INIT_DELAYED_WORK(&bdisp->timeout_work, bdisp_irq_timeout);
	bdisp->work_queue = create_workqueue(BDISP_NAME);

	spin_lock_init(&bdisp->slock);
	mutex_init(&bdisp->lock);

	/* get resources */
	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
	bdisp->regs = devm_ioremap_resource(dev, res);
	if (IS_ERR(bdisp->regs)) {
		dev_err(dev, "failed to get regs\n");
		return PTR_ERR(bdisp->regs);
	}

	bdisp->clock = devm_clk_get(dev, BDISP_NAME);
	if (IS_ERR(bdisp->clock)) {
		dev_err(dev, "failed to get clock\n");
		return PTR_ERR(bdisp->clock);
	}

	ret = clk_prepare(bdisp->clock);
	if (ret < 0) {
		dev_err(dev, "clock prepare failed\n");
		bdisp->clock = ERR_PTR(-EINVAL);
		return ret;
	}

	res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
	if (!res) {
		dev_err(dev, "failed to get IRQ resource\n");
		goto err_clk;
	}

	ret = devm_request_threaded_irq(dev, res->start, bdisp_irq_handler,
					bdisp_irq_thread, IRQF_ONESHOT,
					pdev->name, bdisp);
	if (ret) {
		dev_err(dev, "failed to install irq\n");
		goto err_clk;
	}

	/* v4l2 register */
	ret = v4l2_device_register(dev, &bdisp->v4l2_dev);
	if (ret) {
		dev_err(dev, "failed to register\n");
		goto err_clk;
	}

	/* Debug */
	ret = bdisp_debugfs_create(bdisp);
	if (ret) {
		dev_err(dev, "failed to create debugfs\n");
		goto err_v4l2;
	}

	/* Power management */
	pm_runtime_enable(dev);
	ret = pm_runtime_get_sync(dev);
	if (ret < 0) {
		dev_err(dev, "failed to set PM\n");
		goto err_dbg;
	}

	/* Filters */
	if (bdisp_hw_alloc_filters(bdisp->dev)) {
		dev_err(bdisp->dev, "no memory for filters\n");
		ret = -ENOMEM;
		goto err_pm;
	}

	/* Register */
	ret = bdisp_register_device(bdisp);
	if (ret) {
		dev_err(dev, "failed to register\n");
		goto err_filter;
	}

	dev_info(dev, "%s%d registered as /dev/video%d\n", BDISP_NAME,
		 bdisp->id, bdisp->vdev.num);

	pm_runtime_put(dev);

	return 0;

err_filter:
	bdisp_hw_free_filters(bdisp->dev);
err_pm:
	pm_runtime_put(dev);
err_dbg:
	bdisp_debugfs_remove(bdisp);
err_v4l2:
	v4l2_device_unregister(&bdisp->v4l2_dev);
err_clk:
	if (!IS_ERR(bdisp->clock))
		clk_unprepare(bdisp->clock);

	return ret;
}
gceSTATUS
_SetClock(
    IN gckPLATFORM Platform,
    IN gceCORE GPU,
    IN gctBOOL Enable
    )
{
    struct imx_priv* priv = Platform->priv;
    struct clk *clk_3dcore = priv->clk_3d_core;
    struct clk *clk_3dshader = priv->clk_3d_shader;
#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,5,0)
    struct clk *clk_3d_axi = priv->clk_3d_axi;
#endif
    struct clk *clk_2dcore = priv->clk_2d_core;
    struct clk *clk_2d_axi = priv->clk_2d_axi;
    struct clk *clk_vg_axi = priv->clk_vg_axi;


#if LINUX_VERSION_CODE < KERNEL_VERSION(3,5,0)
    if (Enable) {
        switch (GPU) {
        case gcvCORE_MAJOR:
            clk_enable(clk_3dcore);
            if (cpu_is_mx6q())
                clk_enable(clk_3dshader);
            break;
        case gcvCORE_2D:
            clk_enable(clk_2dcore);
            clk_enable(clk_2d_axi);
            break;
        case gcvCORE_VG:
            clk_enable(clk_2dcore);
            clk_enable(clk_vg_axi);
            break;
        default:
            break;
        }
    } else {
        switch (GPU) {
        case gcvCORE_MAJOR:
            if (cpu_is_mx6q())
                clk_disable(clk_3dshader);
            clk_disable(clk_3dcore);
            break;
       case gcvCORE_2D:
            clk_disable(clk_2dcore);
            clk_disable(clk_2d_axi);
            break;
        case gcvCORE_VG:
            clk_disable(clk_2dcore);
            clk_disable(clk_vg_axi);
            break;
        default:
            break;
        }
    }
#else
    if (Enable) {
        switch (GPU) {
        case gcvCORE_MAJOR:
            clk_prepare(clk_3dcore);
            clk_enable(clk_3dcore);
            clk_prepare(clk_3dshader);
            clk_enable(clk_3dshader);
            clk_prepare(clk_3d_axi);
            clk_enable(clk_3d_axi);
            break;
        case gcvCORE_2D:
            clk_prepare(clk_2dcore);
            clk_enable(clk_2dcore);
            clk_prepare(clk_2d_axi);
            clk_enable(clk_2d_axi);
            break;
        case gcvCORE_VG:
            clk_prepare(clk_2dcore);
            clk_enable(clk_2dcore);
            clk_prepare(clk_vg_axi);
            clk_enable(clk_vg_axi);
            break;
        default:
            break;
        }
    } else {
        switch (GPU) {
        case gcvCORE_MAJOR:
            clk_disable(clk_3dshader);
            clk_unprepare(clk_3dshader);
            clk_disable(clk_3dcore);
            clk_unprepare(clk_3dcore);
            clk_disable(clk_3d_axi);
            clk_unprepare(clk_3d_axi);
            break;
       case gcvCORE_2D:
            clk_disable(clk_2dcore);
            clk_unprepare(clk_2dcore);
            clk_disable(clk_2d_axi);
            clk_unprepare(clk_2d_axi);
            break;
        case gcvCORE_VG:
            clk_disable(clk_2dcore);
            clk_unprepare(clk_2dcore);
            clk_disable(clk_vg_axi);
            clk_unprepare(clk_vg_axi);
            break;
        default:
            break;
        }
    }
#endif

    return gcvSTATUS_OK;
}
Beispiel #26
0
void mdss_dsi_prepare_clocks(struct mdss_dsi_ctrl_pdata  *ctrl_pdata)
{
	clk_prepare(ctrl_pdata->byte_clk);
	clk_prepare(ctrl_pdata->esc_clk);
	clk_prepare(ctrl_pdata->pixel_clk);
}
Beispiel #27
0
static int nop_usb_xceiv_probe(struct platform_device *pdev)
{
	struct device *dev = &pdev->dev;
	struct nop_usb_xceiv_platform_data *pdata = pdev->dev.platform_data;
	struct nop_usb_xceiv	*nop;
	enum usb_phy_type	type = USB_PHY_TYPE_USB2;
	int err;
	u32 clk_rate = 0;
	bool needs_vcc = false;
	bool needs_reset = false;

	nop = devm_kzalloc(&pdev->dev, sizeof(*nop), GFP_KERNEL);
	if (!nop)
		return -ENOMEM;

	nop->phy.otg = devm_kzalloc(&pdev->dev, sizeof(*nop->phy.otg),
							GFP_KERNEL);
	if (!nop->phy.otg)
		return -ENOMEM;

	if (dev->of_node) {
		struct device_node *node = dev->of_node;

		if (of_property_read_u32(node, "clock-frequency", &clk_rate))
			clk_rate = 0;

		needs_vcc = of_property_read_bool(node, "vcc-supply");
		needs_reset = of_property_read_bool(node, "reset-supply");

	} else if (pdata) {
		type = pdata->type;
		clk_rate = pdata->clk_rate;
		needs_vcc = pdata->needs_vcc;
		needs_reset = pdata->needs_reset;
	}

	nop->clk = devm_clk_get(&pdev->dev, "main_clk");
	if (IS_ERR(nop->clk)) {
		dev_dbg(&pdev->dev, "Can't get phy clock: %ld\n",
					PTR_ERR(nop->clk));
	}

	if (!IS_ERR(nop->clk) && clk_rate) {
		err = clk_set_rate(nop->clk, clk_rate);
		if (err) {
			dev_err(&pdev->dev, "Error setting clock rate\n");
			return err;
		}
	}

	if (!IS_ERR(nop->clk)) {
		err = clk_prepare(nop->clk);
		if (err) {
			dev_err(&pdev->dev, "Error preparing clock\n");
			return err;
		}
	}

	nop->vcc = devm_regulator_get(&pdev->dev, "vcc");
	if (IS_ERR(nop->vcc)) {
		dev_dbg(&pdev->dev, "Error getting vcc regulator: %ld\n",
					PTR_ERR(nop->vcc));
		if (needs_vcc)
			return -EPROBE_DEFER;
	}

	nop->reset = devm_regulator_get(&pdev->dev, "reset");
	if (IS_ERR(nop->reset)) {
		dev_dbg(&pdev->dev, "Error getting reset regulator: %ld\n",
					PTR_ERR(nop->reset));
		if (needs_reset)
			return -EPROBE_DEFER;
	}

	nop->dev		= &pdev->dev;
	nop->phy.dev		= nop->dev;
	nop->phy.label		= "nop-xceiv";
	nop->phy.set_suspend	= nop_set_suspend;
	nop->phy.init		= nop_init;
	nop->phy.shutdown	= nop_shutdown;
	nop->phy.state		= OTG_STATE_UNDEFINED;
	nop->phy.type		= type;

	nop->phy.otg->phy		= &nop->phy;
	nop->phy.otg->set_host		= nop_set_host;
	nop->phy.otg->set_peripheral	= nop_set_peripheral;

	err = usb_add_phy_dev(&nop->phy);
	if (err) {
		dev_err(&pdev->dev, "can't register transceiver, err: %d\n",
			err);
		goto err_add;
	}

	platform_set_drvdata(pdev, nop);

	ATOMIC_INIT_NOTIFIER_HEAD(&nop->phy.notifier);

	return 0;

err_add:
	if (!IS_ERR(nop->clk))
		clk_unprepare(nop->clk);
	return err;
}
Beispiel #28
0
static int sh_tmu_setup(struct sh_tmu_device *tmu, struct platform_device *pdev)
{
	unsigned int i;
	int ret;

	tmu->pdev = pdev;

	raw_spin_lock_init(&tmu->lock);

	if (IS_ENABLED(CONFIG_OF) && pdev->dev.of_node) {
		ret = sh_tmu_parse_dt(tmu);
		if (ret < 0)
			return ret;
	} else if (pdev->dev.platform_data) {
		const struct platform_device_id *id = pdev->id_entry;
		struct sh_timer_config *cfg = pdev->dev.platform_data;

		tmu->model = id->driver_data;
		tmu->num_channels = hweight8(cfg->channels_mask);
	} else {
		dev_err(&tmu->pdev->dev, "missing platform data\n");
		return -ENXIO;
	}

	/* Get hold of clock. */
	tmu->clk = clk_get(&tmu->pdev->dev, "fck");
	if (IS_ERR(tmu->clk)) {
		dev_err(&tmu->pdev->dev, "cannot get clock\n");
		return PTR_ERR(tmu->clk);
	}

	ret = clk_prepare(tmu->clk);
	if (ret < 0)
		goto err_clk_put;

	/* Map the memory resource. */
	ret = sh_tmu_map_memory(tmu);
	if (ret < 0) {
		dev_err(&tmu->pdev->dev, "failed to remap I/O memory\n");
		goto err_clk_unprepare;
	}

	/* Allocate and setup the channels. */
	tmu->channels = kzalloc(sizeof(*tmu->channels) * tmu->num_channels,
				GFP_KERNEL);
	if (tmu->channels == NULL) {
		ret = -ENOMEM;
		goto err_unmap;
	}

	/*
	 * Use the first channel as a clock event device and the second channel
	 * as a clock source.
	 */
	for (i = 0; i < tmu->num_channels; ++i) {
		ret = sh_tmu_channel_setup(&tmu->channels[i], i,
					   i == 0, i == 1, tmu);
		if (ret < 0)
			goto err_unmap;
	}

	platform_set_drvdata(pdev, tmu);

	return 0;

err_unmap:
	kfree(tmu->channels);
	iounmap(tmu->mapbase);
err_clk_unprepare:
	clk_unprepare(tmu->clk);
err_clk_put:
	clk_put(tmu->clk);
	return ret;
}
int vcap_clk_powerup(struct vcap_dev *dev, struct device *ddev)
{
	int ret = 0;

	dev->vcap_clk = clk_get(ddev, "core_clk");
	if (IS_ERR(dev->vcap_clk)) {
		dev->vcap_clk = NULL;
		pr_err("%s: Could not clk_get core_clk\n", __func__);
		clk_put(dev->vcap_clk);
		dev->vcap_clk = NULL;
		return -EINVAL;
	}

	clk_prepare(dev->vcap_clk);
	ret = clk_enable(dev->vcap_clk);
	if (ret) {
		pr_err("%s: Failed core clk_enable %d\n", __func__, ret);
		goto fail_vcap_clk_unprep;
	}

	clk_set_rate(dev->vcap_clk, 160000000);
	if (ret) {
		pr_err("%s: Failed core set_rate %d\n", __func__, ret);
		goto fail_vcap_clk;
	}

	dev->vcap_npl_clk = clk_get(ddev, "vcap_npl_clk");
	if (IS_ERR(dev->vcap_npl_clk)) {
		dev->vcap_npl_clk = NULL;
		pr_err("%s: Could not clk_get npl\n", __func__);
		clk_put(dev->vcap_npl_clk);
		dev->vcap_npl_clk = NULL;
		goto fail_vcap_clk;
	}

	clk_prepare(dev->vcap_npl_clk);
	ret = clk_enable(dev->vcap_npl_clk);
	if (ret) {
		pr_err("%s:Failed npl clk_enable %d\n", __func__, ret);
		goto fail_vcap_npl_clk_unprep;
	}

	dev->vcap_p_clk = clk_get(ddev, "iface_clk");
	if (IS_ERR(dev->vcap_p_clk)) {
		dev->vcap_p_clk = NULL;
		pr_err("%s: Could not clk_get pix(AHB)\n", __func__);
		clk_put(dev->vcap_p_clk);
		dev->vcap_p_clk = NULL;
		goto fail_vcap_npl_clk;
	}

	clk_prepare(dev->vcap_p_clk);
	ret = clk_enable(dev->vcap_p_clk);
	if (ret) {
		pr_err("%s: Failed pix(AHB) clk_enable %d\n", __func__, ret);
		goto fail_vcap_p_clk_unprep;
	}
	return 0;

fail_vcap_p_clk_unprep:
	clk_unprepare(dev->vcap_p_clk);
	clk_put(dev->vcap_p_clk);
	dev->vcap_p_clk = NULL;

fail_vcap_npl_clk:
	clk_disable(dev->vcap_npl_clk);
fail_vcap_npl_clk_unprep:
	clk_unprepare(dev->vcap_npl_clk);
	clk_put(dev->vcap_npl_clk);
	dev->vcap_npl_clk = NULL;

fail_vcap_clk:
	clk_disable(dev->vcap_clk);
fail_vcap_clk_unprep:
	clk_unprepare(dev->vcap_clk);
	clk_put(dev->vcap_clk);
	dev->vcap_clk = NULL;
	return -EINVAL;
}
int __init acpuclk_cortex_init(struct platform_device *pdev,
	struct acpuclk_drv_data *data)
{
	unsigned long max_cpu_khz = 0;
	int i, rc;

	acpuclk_init_data = data;
	mutex_init(&acpuclk_init_data->lock);

	bus_perf_client = msm_bus_scale_register_client(
		acpuclk_init_data->bus_scale);
	if (!bus_perf_client) {
		pr_err("Unable to register bus client\n");
		BUG();
	}

	for (i = 0; i < NUM_SRC; i++) {
		if (!acpuclk_init_data->src_clocks[i].name)
			continue;
		acpuclk_init_data->src_clocks[i].clk =
			clk_get(&pdev->dev,
				acpuclk_init_data->src_clocks[i].name);
		BUG_ON(IS_ERR(acpuclk_init_data->src_clocks[i].clk));
		/*
		 * Prepare the PLLs because we enable/disable them
		 * in atomic context during power collapse/restore.
		 */
		BUG_ON(clk_prepare(acpuclk_init_data->src_clocks[i].clk));
	}

	/* Improve boot time by ramping up CPU immediately */
	for (i = 0; acpuclk_init_data->freq_tbl[i].khz != 0 &&
			acpuclk_init_data->freq_tbl[i].use_for_scaling; i++)
		max_cpu_khz = acpuclk_init_data->freq_tbl[i].khz;

	/* Initialize regulators */
	rc = increase_vdd(acpuclk_init_data->freq_tbl[i].vdd_cpu,
		acpuclk_init_data->freq_tbl[i].vdd_mem);
	if (rc)
		goto err_vdd;

	rc = regulator_enable(acpuclk_init_data->vdd_mem);
	if (rc) {
		dev_err(&pdev->dev, "regulator_enable for mem failed\n");
		goto err_vdd;
	}

	rc = regulator_enable(acpuclk_init_data->vdd_cpu);
	if (rc) {
		dev_err(&pdev->dev, "regulator_enable for cpu failed\n");
		goto err_vdd_cpu;
	}

	acpuclk_cortex_set_rate(0, max_cpu_khz, SETRATE_INIT);

	acpuclk_register(&acpuclk_cortex_data);
	cpufreq_table_init();

	return 0;

err_vdd_cpu:
	regulator_disable(acpuclk_init_data->vdd_mem);
err_vdd:
	regulator_put(acpuclk_init_data->vdd_mem);
	regulator_put(acpuclk_init_data->vdd_cpu);

	for (i = 0; i < NUM_SRC; i++) {
		if (!acpuclk_init_data->src_clocks[i].name)
			continue;
		clk_unprepare(acpuclk_init_data->src_clocks[i].clk);
		clk_put(acpuclk_init_data->src_clocks[i].clk);
	}
	return rc;
}