示例#1
0
__devinit int ce4100_gpio_irq_setup(struct intelce_gpio_chip *c, struct pci_dev *pdev)
{
	int i;
	int irq;
	int ret;

	c->irq_base = irq_alloc_descs(-1, 0, CE4100_PUB_GPIOS_PER_BANK, -1);
	if (c->irq_base < 0)
		return c->irq_base;

	/* mask + ACK all interrupt sources */
	intelce_gpio_mmio_write32(0, c->reg_base + CE4100_PUB_GPIO_INT_EN);
	intelce_gpio_mmio_write32(0xFFF, c->reg_base + CE4100_PUB_GPIO_INT_STAT);

	ret = request_irq(pdev->irq, ce4100_gpio_irq_handler, IRQF_SHARED, "ce4100_gpio", c);
	if (ret)
		goto out_free_desc;

	/*
	 * This gpio irq controller latches level irqs. Testing shows that if
	 * we unmask & ACK the IRQ before the source of the interrupt is gone
	 * then the interrupt is active again.
	 */
	irq = c->irq_base;
	for (i=0; i < c->chip.ngpio; i++) {
		irq_set_chip_and_handler_name(irq, &ce4100_irq_chip, handle_fasteoi_irq, "gpio_irq");
		irq_set_chip_data(irq, c);  
		irq++;	
	}
	return 0;

out_free_desc:
	irq_free_descs(c->irq_base, CE4100_PUB_GPIOS_PER_BANK);
	return ret;
}
示例#2
0
static int __devinit em_gio_irq_domain_init(struct em_gio_priv *p)
{
	struct platform_device *pdev = p->pdev;
	struct gpio_em_config *pdata = pdev->dev.platform_data;

	p->irq_base = irq_alloc_descs(pdata->irq_base, 0,
				      pdata->number_of_pins, numa_node_id());
	if (IS_ERR_VALUE(p->irq_base)) {
		dev_err(&pdev->dev, "cannot get irq_desc\n");
		return -ENXIO;
	}
	pr_debug("gio: hw base = %d, nr = %d, sw base = %d\n",
		 pdata->gpio_base, pdata->number_of_pins, p->irq_base);

	p->irq_domain = irq_domain_add_legacy(pdev->dev.of_node,
					      pdata->number_of_pins,
					      p->irq_base, 0,
					      &em_gio_irq_domain_ops, p);
	if (!p->irq_domain) {
		irq_free_descs(p->irq_base, pdata->number_of_pins);
		return -ENXIO;
	}

	return 0;
}
示例#3
0
/**
 * irq_sim_init - Initialize the interrupt simulator: allocate a range of
 *                dummy interrupts.
 *
 * @sim:        The interrupt simulator object to initialize.
 * @num_irqs:   Number of interrupts to allocate
 *
 * Returns 0 on success and a negative error number on failure.
 */
int irq_sim_init(struct irq_sim *sim, unsigned int num_irqs)
{
	int i;

	sim->irqs = kmalloc_array(num_irqs, sizeof(*sim->irqs), GFP_KERNEL);
	if (!sim->irqs)
		return -ENOMEM;

	sim->irq_base = irq_alloc_descs(-1, 0, num_irqs, 0);
	if (sim->irq_base < 0) {
		kfree(sim->irqs);
		return sim->irq_base;
	}

	for (i = 0; i < num_irqs; i++) {
		sim->irqs[i].irqnum = sim->irq_base + i;
		sim->irqs[i].enabled = false;
		irq_set_chip(sim->irq_base + i, &irq_sim_irqchip);
		irq_set_chip_data(sim->irq_base + i, &sim->irqs[i]);
		irq_set_handler(sim->irq_base + i, &handle_simple_irq);
		irq_modify_status(sim->irq_base + i,
				  IRQ_NOREQUEST | IRQ_NOAUTOEN, IRQ_NOPROBE);
	}

	init_irq_work(&sim->work_ctx.work, irq_sim_handle_irq);
	sim->irq_count = num_irqs;

	return 0;
}
示例#4
0
文件: mach-mx31ads.c 项目: 7L/pi_plus
static void __init mx31ads_init_expio(void)
{
	int irq_base;
	int i, irq;

	printk(KERN_INFO "MX31ADS EXPIO(CPLD) hardware\n");

	/*
	 * Configure INT line as GPIO input
	 */
	mxc_iomux_alloc_pin(IOMUX_MODE(MX31_PIN_GPIO1_4, IOMUX_CONFIG_GPIO), "expio");

	/* disable the interrupt and clear the status */
	__raw_writew(0xFFFF, PBC_INTMASK_CLEAR_REG);
	__raw_writew(0xFFFF, PBC_INTSTATUS_REG);

	irq_base = irq_alloc_descs(-1, 0, MXC_MAX_EXP_IO_LINES, numa_node_id());
	WARN_ON(irq_base < 0);

	domain = irq_domain_add_legacy(NULL, MXC_MAX_EXP_IO_LINES, irq_base, 0,
				       &irq_domain_simple_ops, NULL);
	WARN_ON(!domain);

	for (i = irq_base; i < irq_base + MXC_MAX_EXP_IO_LINES; i++) {
		irq_set_chip_and_handler(i, &expio_irq_chip, handle_level_irq);
		set_irq_flags(i, IRQF_VALID);
	}
	irq = gpio_to_irq(IOMUX_TO_GPIO(MX31_PIN_GPIO1_4));
	irq_set_irq_type(irq, IRQ_TYPE_LEVEL_HIGH);
	irq_set_chained_handler(irq, mx31ads_expio_irq_handler);
}
示例#5
0
static int of_max14577_dt(struct device *dev, struct max14577_platform_data *pdata)
{
	struct device_node *np = dev->of_node;

	if(!np)
		return -EINVAL;

	pdata->irq_gpio = of_get_named_gpio_flags(np, "max77836,irq-gpio",
				0, &pdata->irq_gpio_flags);
	pr_info("%s: irq-gpio: %u \n", __func__, pdata->irq_gpio);

#ifdef CONFIG_SPARSE_IRQ
	pdata->irq_base = irq_alloc_descs(-1, 0, MAX14577_IRQ_NUM, -1);
	if (pdata->irq_base < 0) {
		pr_err("%s: irq_alloc_descs Fail ret(%d)\n", __func__, pdata->irq_base);
		return -EFAULT;
	}
#else
	ret = of_property_read_u32(np, "max77836,irq-base", &pdata->irq_base);
	if (ret) {
		pr_err("%s: Failed to read irq-base\n", __func__);
		return -EFAULT;
	}
#endif
	pr_info("%s: irq_base:%d\n", __func__, pdata->irq_base);

	pdata->wakeup = of_property_read_bool(np, "max77836,wakeup");
	return 0;
}
示例#6
0
struct irq_desc * __ref irq_to_desc_alloc_node(unsigned int irq, int node)
{
	int res = irq_alloc_descs(irq, irq, 1, node);

	if (res == -EEXIST || res == irq)
		return irq_to_desc(irq);
	return NULL;
}
static int crystalcove_gpio_probe(struct platform_device *pdev)
{
	int irq = platform_get_irq(pdev, 0);
	struct crystalcove_gpio *cg = &gpio_info;
	int retval;
	int i;
	int gpio_base, irq_base;
	struct device *dev = intel_mid_pmic_dev();

	mutex_init(&cg->buslock);
	cg->irq_base = VV_PMIC_GPIO_IRQBASE;
	cg->chip.label = "intel_crystalcove";
	cg->chip.direction_input = crystalcove_gpio_direction_input;
	cg->chip.direction_output = crystalcove_gpio_direction_output;
	cg->chip.get = crystalcove_gpio_get;
	cg->chip.set = crystalcove_gpio_set;
	cg->chip.to_irq = crystalcove_gpio_to_irq;
	cg->chip.base = VV_PMIC_GPIO_BASE;
	cg->chip.ngpio = NUM_GPIO;
	cg->chip.can_sleep = 1;
	cg->chip.dev = dev;
	cg->chip.dbg_show = crystalcove_gpio_dbg_show;
	retval = gpiochip_add(&cg->chip);
	if (retval) {
		pr_warn("crystalcove: add gpio chip error: %d\n", retval);
		return retval;
	}

	irq_base = irq_alloc_descs(cg->irq_base, 0, NUM_GPIO, 0);
	if (cg->irq_base != irq_base)
		panic("gpio base irq fail, needs %d, return %d\n",
				cg->irq_base, irq_base);
	for (i = 0; i < NUM_GPIO; i++) {
		pr_err("gpio %x: set handler: %d\n", cg, i + cg->irq_base);
		irq_set_chip_data(i + cg->irq_base, cg);
		irq_set_chip_and_handler_name(i + cg->irq_base,
					      &crystalcove_irqchip,
					      handle_simple_irq,
					      "demux");
	}

        retval = request_threaded_irq(irq, NULL, crystalcove_gpio_irq_handler,
                        IRQF_ONESHOT, "crystalcove_gpio", cg);

        if (retval) {
                pr_warn("Interrupt request failed\n");
                return retval;
        }
        
        retval  = sysfs_create_file(&dev->kobj, &platform_hwid_attr.attr);

        if(retval)
            pr_warn("%s, sysfs_create_file failed, %d\n", __func__, retval);

	return 0;
}
示例#8
0
文件: malta-int.c 项目: Endika/linux
void __init arch_init_irq(void)
{
	int corehi_irq;

	/*
	 * Preallocate the i8259's expected virq's here. Since irqchip_init()
	 * will probe the irqchips in hierarchial order, i8259 is probed last.
	 * If anything allocates a virq before the i8259 is probed, it will
	 * be given one of the i8259's expected range and consequently setup
	 * of the i8259 will fail.
	 */
	WARN(irq_alloc_descs(I8259A_IRQ_BASE, I8259A_IRQ_BASE,
			    16, numa_node_id()) < 0,
		"Cannot reserve i8259 virqs at IRQ%d\n", I8259A_IRQ_BASE);

	i8259_set_poll(mips_pcibios_iack);
	irqchip_init();

	switch (mips_revision_sconid) {
	case MIPS_REVISION_SCON_SOCIT:
	case MIPS_REVISION_SCON_ROCIT:
		if (cpu_has_veic)
			init_msc_irqs(MIPS_MSC01_IC_REG_BASE,
					MSC01E_INT_BASE, msc_eicirqmap,
					msc_nr_eicirqs);
		else
			init_msc_irqs(MIPS_MSC01_IC_REG_BASE,
					MSC01C_INT_BASE, msc_irqmap,
					msc_nr_irqs);
		break;

	case MIPS_REVISION_SCON_SOCITSC:
	case MIPS_REVISION_SCON_SOCITSCP:
		if (cpu_has_veic)
			init_msc_irqs(MIPS_SOCITSC_IC_REG_BASE,
					MSC01E_INT_BASE, msc_eicirqmap,
					msc_nr_eicirqs);
		else
			init_msc_irqs(MIPS_SOCITSC_IC_REG_BASE,
					MSC01C_INT_BASE, msc_irqmap,
					msc_nr_irqs);
	}

	if (gic_present) {
		corehi_irq = MIPS_CPU_IRQ_BASE + MIPSCPU_INT_COREHI;
	} else if (cpu_has_veic) {
		set_vi_handler(MSC01E_INT_COREHI, corehi_irqdispatch);
		corehi_irq = MSC01E_INT_BASE + MSC01E_INT_COREHI;
	} else {
		corehi_irq = MIPS_CPU_IRQ_BASE + MIPSCPU_INT_COREHI;
	}

	setup_irq(corehi_irq, &corehi_irqaction);
}
void __init mmp_dt_irq_init(void)
{
	struct device_node *node;
	const struct of_device_id *of_id;
	struct mmp_intc_conf *conf;
	int nr_irqs, irq_base, ret, irq;

	node = of_find_matching_node(NULL, intc_ids);
	if (!node) {
		pr_err("Failed to find interrupt controller in arch-mmp\n");
		return;
	}
	of_id = of_match_node(intc_ids, node);
	conf = of_id->data;

	ret = of_property_read_u32(node, "mrvl,intc-nr-irqs", &nr_irqs);
	if (ret) {
		pr_err("Not found mrvl,intc-nr-irqs property\n");
		return;
	}

	mmp_icu_base = of_iomap(node, 0);
	if (!mmp_icu_base) {
		pr_err("Failed to get interrupt controller register\n");
		return;
	}

	irq_base = irq_alloc_descs(-1, 0, nr_irqs - NR_IRQS_LEGACY, 0);
	if (irq_base < 0) {
		pr_err("Failed to allocate IRQ numbers\n");
		goto err;
	} else if (irq_base != NR_IRQS_LEGACY) {
		pr_err("ICU's irqbase should be started from 0\n");
		goto err;
	}
	icu_data[0].conf_enable = conf->conf_enable;
	icu_data[0].conf_disable = conf->conf_disable;
	icu_data[0].conf_mask = conf->conf_mask;
	icu_data[0].nr_irqs = nr_irqs;
	icu_data[0].virq_base = 0;
	icu_data[0].domain = irq_domain_add_legacy(node, nr_irqs, 0, 0,
						   &mmp_irq_domain_ops,
						   &icu_data[0]);
	irq_set_default_host(icu_data[0].domain);
	for (irq = 0; irq < nr_irqs; irq++)
		icu_mask_irq(irq_get_irq_data(irq));
	mmp2_mux_init(node);
	return;
err:
	iounmap(mmp_icu_base);
}
示例#10
0
void systemasic_irq_init(void)
{
	int irq_base, i;

	irq_base = irq_alloc_descs(HW_EVENT_IRQ_BASE, HW_EVENT_IRQ_BASE,
				   HW_EVENT_IRQ_MAX - HW_EVENT_IRQ_BASE, -1);
	if (IS_ERR_VALUE(irq_base)) {
		pr_err("%s: failed hooking irqs\n", __func__);
		return;
	}

	for (i = HW_EVENT_IRQ_BASE; i < HW_EVENT_IRQ_MAX; i++)
		irq_set_chip_and_handler(i, &systemasic_int, handle_level_irq);
}
示例#11
0
static void __init combiner_init(void __iomem *combiner_base,
				 struct device_node *np)
{
	int i, irq, irq_base;
	unsigned int nr_irq, soc_max_nr;

	soc_max_nr = (soc_is_exynos5250() || soc_is_exynos542x())
			? EXYNOS5_MAX_COMBINER_NR : EXYNOS4_MAX_COMBINER_NR;

	if (np) {
		if (of_property_read_u32(np, "samsung,combiner-nr", &max_nr)) {
			pr_warning("%s: number of combiners not specified, "
				"setting default as %d.\n",
				__func__, EXYNOS4_MAX_COMBINER_NR);
			max_nr = EXYNOS4_MAX_COMBINER_NR;
		}
	} else {
		max_nr = soc_is_exynos5250() ? EXYNOS5_MAX_COMBINER_NR :
						EXYNOS4_MAX_COMBINER_NR;
	}
	nr_irq = max_nr * MAX_IRQ_IN_COMBINER;

	irq_base = irq_alloc_descs(COMBINER_IRQ(0, 0), 1, nr_irq, 0);
	if (IS_ERR_VALUE(irq_base)) {
		irq_base = COMBINER_IRQ(0, 0);
		pr_warning("%s: irq desc alloc failed. Continuing with %d as linux irq base\n", __func__, irq_base);
	}

	combiner_irq_domain = irq_domain_add_legacy(np, nr_irq, irq_base, 0,
				&combiner_irq_domain_ops, &combiner_data);
	if (WARN_ON(!combiner_irq_domain)) {
		pr_warning("%s: irq domain init failed\n", __func__);
		return;
	}

	for (i = 0; i < max_nr; i++) {
		combiner_init_one(i, combiner_base + (i >> 2) * 0x10);
		irq = IRQ_SPI(i);
#ifdef CONFIG_OF
		if (np)
			irq = irq_of_parse_and_map(np, i);
#endif
		combiner_cascade_irq(i, irq);
	}

#ifdef CONFIG_PM
	/* Setup suspend/resume combiner saving */
	cpu_pm_register_notifier(&combiner_notifier_block);
#endif
}
示例#12
0
文件: gpio.c 项目: 01org/KVMGT-kernel
/*
 * irqdomain initialization: pile up irqdomains on top of AIC range
 */
static void __init at91_gpio_irqdomain(struct at91_gpio_chip *at91_gpio)
{
	int irq_base;

	irq_base = irq_alloc_descs(-1, 0, at91_gpio->chip.ngpio, 0);
	if (irq_base < 0)
		panic("at91_gpio.%d: error %d: couldn't allocate IRQ numbers.\n",
			at91_gpio->pioc_idx, irq_base);
	at91_gpio->domain = irq_domain_add_legacy(NULL, at91_gpio->chip.ngpio,
						  irq_base, 0,
						  &irq_domain_simple_ops, NULL);
	if (!at91_gpio->domain)
		panic("at91_gpio.%d: couldn't allocate irq domain.\n",
			at91_gpio->pioc_idx);
}
示例#13
0
static int of_max77804k_dt(struct device *dev, struct max77804k_platform_data *pdata)
{
	struct device_node *np = dev->of_node;
//	int retval = 0;

#ifdef CONFIG_VIBETONZ
	struct max77804k_haptic_platform_data *haptic_data;
#endif
	if(!np)
		return -EINVAL;

#ifdef CONFIG_VIBETONZ
	haptic_data = kzalloc(sizeof(struct max77804k_haptic_platform_data), GFP_KERNEL);
	if (haptic_data == NULL)
		return -ENOMEM;
#endif

	pdata->irq_gpio = of_get_named_gpio_flags(np, "max77804k,irq-gpio",
				0, &pdata->irq_gpio_flags);
	pdata->wakeup = of_property_read_bool(np, "max77804k,wakeup");
#if 0
	pdata->irq_base = irq_alloc_descs(-1, 0, MAX77804K_IRQ_NR, -1);
	if (pdata->irq_base < 0) {
		pr_info("%s irq_alloc_descs is failed! irq_base:%d\n", __func__, pdata->irq_base);
		/* getting a predefined irq_base on dt file	*/
		of_property_read_u32(np, "max77804k,irq-base", &pdata->irq_base);
	}
	retval = of_get_named_gpio(np, "max77804k,wc-irq-gpio", 0);
	if (retval < 0)
		pdata->wc_irq_gpio = 0;
	else
		pdata->wc_irq_gpio = retval;
#endif
	pr_info("%s: irq-gpio: %u \n", __func__, pdata->irq_gpio);
#ifdef CONFIG_VIBETONZ
	of_property_read_u32(np, "haptic,max_timeout", &haptic_data->max_timeout);
	of_property_read_u32(np, "haptic,duty", &haptic_data->duty);
	of_property_read_u32(np, "haptic,period", &haptic_data->period);
	of_property_read_u32(np, "haptic,pwm_id", &haptic_data->pwm_id);
	pr_info("%s: timeout: %u \n", __func__, haptic_data->max_timeout);
	pr_info("%s: duty: %u \n", __func__, haptic_data->duty);
	pr_info("%s: period: %u \n", __func__, haptic_data->period);
	pr_info("%s: pwm_id: %u \n", __func__, haptic_data->pwm_id);
	pdata->haptic_data = haptic_data;
	kfree(haptic_data);
#endif
	return 0;
}
示例#14
0
static int __init shirq_init(struct spear_shirq **shirq_blocks, int block_nr,
		struct device_node *np)
{
	int i, parent_irq, virq_base, hwirq = 0, nr_irqs = 0;
	struct irq_domain *shirq_domain;
	void __iomem *base;

	base = of_iomap(np, 0);
	if (!base) {
		pr_err("%s: failed to map shirq registers\n", __func__);
		return -ENXIO;
	}

	for (i = 0; i < block_nr; i++)
		nr_irqs += shirq_blocks[i]->nr_irqs;

	virq_base = irq_alloc_descs(-1, 0, nr_irqs, 0);
	if (IS_ERR_VALUE(virq_base)) {
		pr_err("%s: irq desc alloc failed\n", __func__);
		goto err_unmap;
	}

	shirq_domain = irq_domain_add_legacy(np, nr_irqs, virq_base, 0,
			&irq_domain_simple_ops, NULL);
	if (WARN_ON(!shirq_domain)) {
		pr_warn("%s: irq domain init failed\n", __func__);
		goto err_free_desc;
	}

	for (i = 0; i < block_nr; i++) {
		shirq_blocks[i]->base = base;
		shirq_blocks[i]->virq_base = irq_find_mapping(shirq_domain,
				hwirq);

		parent_irq = irq_of_parse_and_map(np, i);
		spear_shirq_register(shirq_blocks[i], parent_irq);
		hwirq += shirq_blocks[i]->nr_irqs;
	}

	return 0;

err_free_desc:
	irq_free_descs(virq_base, nr_irqs);
err_unmap:
	iounmap(base);
	return -ENXIO;
}
示例#15
0
static void __device_added(struct device *dev)
{
	struct mcuio_device *mdev = to_mcuio_dev(dev);
	struct mcuio_device *hc;
	struct mcuio_soft_hc *shc;
	struct mcuio_device *ic;
	struct mcuio_hc_platform_data *plat;
	int base_irq;

	/* Ignore the hc */
	if (!mdev->device)
		return;
	hc = to_mcuio_dev(dev->parent);
	plat = dev_get_platdata(&hc->dev);
	if (!plat) {
		WARN_ON(1);
		return;
	}
	shc = plat->data;
	if (!shc) {
		WARN_ON(1);
		return;
	}
	/* FIXME: ADD LOCKING */
	ic = shc->irq_controllers[mdev->device];
	if (ic)
		return;
	base_irq = irq_alloc_descs(-1, 0, MCUIO_FUNCS_PER_DEV, 0);
	/* New device, add soft local irq controller */
	ic = mcuio_add_soft_local_irq_ctrl(hc, mdev->device, base_irq);
	if (!ic) {
		pr_err("mcuio soft hc: error adding irq ctrl for dev %d\n",
		       mdev->device);
		return;
	}
	shc->irq_controllers[mdev->device] = ic;
	/*
	  This is the first function of the new device. When the corresponding
	  mcuio_device was instantiated, the hc had no irqs, fix the field
	  up now
	*/
	mdev->irq = base_irq + mdev->fn;
}
示例#16
0
static int pmic_irq_init(void)
{
	int cur_irq;
	int ret;

	pmic->irq_mask = 0xff;
	intel_mid_pmic_writeb(MIRQLVL1, pmic->irq_mask);
	pmic->irq_mask = intel_mid_pmic_readb(MIRQLVL1);
	pmic->irq_base = irq_alloc_descs(VV_PMIC_IRQBASE, 0, PMIC_IRQ_NUM, 0);
	if (pmic->irq_base < 0) {
		dev_warn(pmic->dev, "Failed to allocate IRQs: %d\n",
			 pmic->irq_base);
		pmic->irq_base = 0;
		return -EINVAL;
	}

	/* Register them with genirq */
	for (cur_irq = pmic->irq_base;
	     cur_irq < PMIC_IRQ_NUM + pmic->irq_base;
	     cur_irq++) {
		irq_set_chip_data(cur_irq, pmic);
		irq_set_chip_and_handler(cur_irq, &pmic_irq_chip,
					 handle_edge_irq);
		irq_set_nested_thread(cur_irq, 1);
		irq_set_noprobe(cur_irq);
	}

	ret = request_threaded_irq(pmic->irq, pmic_irq_isr, pmic_irq_thread,
			IRQF_TRIGGER_RISING | IRQF_ONESHOT,
			"intel_mid_pmic", pmic);
	if (ret != 0) {
		dev_err(pmic->dev, "Failed to request IRQ %d: %d\n",
				pmic->irq, ret);
		return ret;
	}
	ret = enable_irq_wake(pmic->irq);
	if (ret != 0) {
		dev_warn(pmic->dev, "Can't enable PMIC IRQ as wake source: %d\n",
			 ret);
	}

	return 0;
}
示例#17
0
/*
 * Initialize the AIC interrupt controller.
 */
void __init at91_aic_init(unsigned int priority[NR_AIC_IRQS])
{
	unsigned int i;
	int irq_base;

	at91_aic_base = ioremap(AT91_AIC, 512);
	if (!at91_aic_base)
		panic("Unable to ioremap AIC registers\n");

	/* Add irq domain for AIC */
	irq_base = irq_alloc_descs(-1, 0, NR_AIC_IRQS, 0);
	if (irq_base < 0) {
		WARN(1, "Cannot allocate irq_descs, assuming pre-allocated\n");
		irq_base = 0;
	}
	at91_aic_domain = irq_domain_add_legacy(at91_aic_np, NR_AIC_IRQS,
						irq_base, 0,
						&irq_domain_simple_ops, NULL);

	if (!at91_aic_domain)
		panic("Unable to add AIC irq domain\n");

	irq_set_default_host(at91_aic_domain);

	/*
	 * The IVR is used by macro get_irqnr_and_base to read and verify.
	 * The irq number is NR_AIC_IRQS when a spurious interrupt has occurred.
	 */
	for (i = 0; i < NR_AIC_IRQS; i++) {
		/* Put hardware irq number in Source Vector Register: */
		at91_aic_write(AT91_AIC_SVR(i), i);
		/* Active Low interrupt, with the specified priority */
		at91_aic_write(AT91_AIC_SMR(i), AT91_AIC_SRCTYPE_LOW | priority[i]);

		irq_set_chip_and_handler(i, &at91_aic_chip, handle_level_irq);
		set_irq_flags(i, IRQF_VALID | IRQF_PROBE);
	}

	at91_aic_hw_init(NR_AIC_IRQS);
}
static int of_max77849_dt(struct device *dev, struct max77849_platform_data *pdata)
{
	struct device_node *np = dev->of_node;
	if(!np) {
		return -EINVAL;
	}

	pdata->irq_gpio = of_get_named_gpio(np, "max77849,irq-gpio", 0);
	if (pdata->irq_gpio < 0) {
		pr_err("%s: failed get max77849 irq-gpio : %d\n",
			__func__, pdata->irq_gpio);
		pdata->irq_gpio = 0;
	}
	pdata->irq_base = irq_alloc_descs(-1, 0, MAX77849_IRQ_NR, -1);
	if (pdata->irq_base < 0) {
		pr_info("%s irq_alloc_descs is failed! irq_base:%d\n", __func__, pdata->irq_base);
		/* getting a predefined irq_base on dt file	*/
		of_property_read_u32(np, "max77849,irq-base", &pdata->irq_base);
	}

#ifdef CONFIG_MUIC_RESET_PIN_ENABLE
	pdata->irq_reset_gpio = of_get_named_gpio(np, "max77849,irq-reset-gpio", 0);
	if (pdata->irq_reset_gpio < 0) {
		pr_err("%s: failed get max77849 irq-reset-gpio : %d\n",
			__func__, pdata->irq_gpio);
		pdata->irq_reset_gpio = -1;
		muic_reset_pin = 0;
	}
	else
		muic_reset_pin = 1;

#endif

	pr_info("%s: irq-gpio: %u \n", __func__, pdata->irq_gpio);
#ifdef CONFIG_MUIC_RESET_PIN_ENABLE
	pr_info("%s: irq-reset-gpio: %u \n", __func__, pdata->irq_reset_gpio);
#endif
	return 0;
}
示例#19
0
文件: hd64461.c 项目: 03199618/linux
int __init setup_hd64461(void)
{
	int irq_base, i;

	printk(KERN_INFO
	       "HD64461 configured at 0x%x on irq %d(mapped into %d to %d)\n",
	       HD64461_IOBASE, CONFIG_HD64461_IRQ, HD64461_IRQBASE,
	       HD64461_IRQBASE + 15);

/* Should be at processor specific part.. */
#if defined(CONFIG_CPU_SUBTYPE_SH7709)
	__raw_writew(0x2240, INTC_ICR1);
#endif
	__raw_writew(0xffff, HD64461_NIMR);

	irq_base = irq_alloc_descs(HD64461_IRQBASE, HD64461_IRQBASE, 16, -1);
	if (IS_ERR_VALUE(irq_base)) {
		pr_err("%s: failed hooking irqs for HD64461\n", __func__);
		return irq_base;
	}

	for (i = 0; i < 16; i++)
		irq_set_chip_and_handler(irq_base + i, &hd64461_irq_chip,
					 handle_level_irq);

	irq_set_chained_handler(CONFIG_HD64461_IRQ, hd64461_irq_demux);
	irq_set_irq_type(CONFIG_HD64461_IRQ, IRQ_TYPE_LEVEL_LOW);

#ifdef CONFIG_HD64461_ENABLER
	printk(KERN_INFO "HD64461: enabling PCMCIA devices\n");
	__raw_writeb(0x4c, HD64461_PCC1CSCIER);
	__raw_writeb(0x00, HD64461_PCC1CSCR);
#endif

	return 0;
}
示例#20
0
/**
 * xaxipcie_alloc_msi_irqdescs - allocate msi irq descs
 * @node: Pointer to device node structure
 * @msg_addr: PCIe MSI message address
 *
 * @return: Allocated MSI IRQ Base/ error
 *
 * @note: This function is called when xaxipcie_init_port() is called
 */
int xaxipcie_alloc_msi_irqdescs(struct device_node *node,
					unsigned long msg_addr)
{
	/* Store the PCIe MSI message address */
	xaxipcie_msg_addr = msg_addr;

	/* Allocate MSI IRQ descriptors */
	xaxipcie_msi_irq_base = irq_alloc_descs(-1, 0,
					XILINX_NUM_MSI_IRQS, 0);

	if (xaxipcie_msi_irq_base < 0)
		return -ENODEV;

	/* Register IRQ domain */
	xaxipcie_irq_domain = irq_domain_add_legacy(node,
				XILINX_NUM_MSI_IRQS,
				xaxipcie_msi_irq_base,
				0, &irq_domain_simple_ops, NULL);

	if (!xaxipcie_irq_domain)
		return -ENOMEM;

	return xaxipcie_msi_irq_base;
}
static int __init omap_init_irq_legacy(u32 base, struct device_node *node)
{
	int j, irq_base;

	omap_irq_base = ioremap(base, SZ_4K);
	if (WARN_ON(!omap_irq_base))
		return -ENOMEM;

	irq_base = irq_alloc_descs(-1, 0, omap_nr_irqs, 0);
	if (irq_base < 0) {
		pr_warn("Couldn't allocate IRQ numbers\n");
		irq_base = 0;
	}

	domain = irq_domain_add_legacy(node, omap_nr_irqs, irq_base, 0,
			&irq_domain_simple_ops, NULL);

	omap_irq_soft_reset();

	for (j = 0; j < omap_nr_irqs; j += 32)
		omap_alloc_gc_legacy(omap_irq_base + j, j + irq_base, 32);

	return 0;
}
示例#22
0
static int __devinit sunxi_gpio_probe(struct platform_device *pdev)
{
	int i;
	int err = 0;
	int names_size = 0;
	int gpio_used = 0;
	int gpio_num = 0;
	struct sunxi_gpio_data *gpio_i = NULL;
	struct sunxi_gpio_data *gpio_data = NULL;
	struct sunxi_gpio_chip *sunxi_chip = NULL;
	char **pnames = NULL;

	/* parse script.bin for [gpio_para] section
	   gpio_used/gpio_num/gpio_pin_x */

	pr_info("sunxi_gpio driver init ver %s\n", SUNXI_GPIO_VER);
	err = script_parser_fetch("gpio_para", "gpio_used", &gpio_used,
					sizeof(gpio_used)/sizeof(int));
	if (err) {
		/* Not error - just info */
		pr_info("%s can't find script.bin '[gpio_para]' 'gpio_used'\n",
			__func__);
		return err;
	}

	if (!gpio_used) {
		pr_info("%s gpio_used is false. Skip gpio initialization\n",
			__func__);
		err = 0;
		return err;
	}

	err = script_parser_fetch("gpio_para", "gpio_num", &gpio_num,
					sizeof(gpio_num)/sizeof(int));
	if (err) {
		pr_err("%s script_parser_fetch '[gpio_para]' 'gpio_num' err\n",
			__func__);
		return err;
	}

	if (!gpio_num) {
		pr_info("%s gpio_num is none. Skip gpio initialization\n",
			__func__);
		err = 0;
		return err;
	}

	/* Allocate memory for sunxi_gpio_chip + data/names array */
	sunxi_chip = kzalloc(sizeof(struct sunxi_gpio_chip) +
				sizeof(struct sunxi_gpio_data) * gpio_num,
				GFP_KERNEL);
	gpio_data = (void *)sunxi_chip + sizeof(struct sunxi_gpio_chip);

	/* Allocate memory for variable array of fixed size strings */
	/* in one chunk. This is to avoid 1+gpio_num kzalloc calls */
	names_size = sizeof(*pnames) * gpio_num +
		     sizeof(char) * MAX_GPIO_NAMELEN * gpio_num;

	pnames = kzalloc(names_size, GFP_KERNEL);
	for (i = 0; i < gpio_num; i++) {
		pnames[i] = (void *)pnames + sizeof(*pnames) * gpio_num +
				i * MAX_GPIO_NAMELEN;
	}

	if ((!pnames) || (!sunxi_chip)) {
		pr_err("%s kzalloc failed\n", __func__);
		err = -ENOMEM;
		goto exit;
	}

	/* Parse gpio_para/pin script data */
	gpio_i = gpio_data;
	for (i = 0; i < gpio_num; i++) {

		sprintf(gpio_i->pin_name, "gpio_pin_%d", i+1);
		err = script_parser_fetch("gpio_para", gpio_i->pin_name,
					(int *)&gpio_i->info,
					sizeof(script_gpio_set_t));

		if (err) {
			pr_err("%s script_parser_fetch '[gpio_para]' '%s' err\n",
				__func__, gpio_i->pin_name);
			break;
		}

		gpio_i++;
	}

	sunxi_chip->gaddr = ioremap(PIO_BASE_ADDRESS, PIO_RANGE_SIZE);
	if (!sunxi_chip->gaddr) {
		pr_err("Can't request gpio registers memory\n");
		err = -EIO;
		goto unmap;
	}

	sunxi_chip->dev		= &pdev->dev;
	sunxi_chip->data	= gpio_data;
	sunxi_chip->chip	= template_chip;
	sunxi_chip->chip.ngpio	= gpio_num;
	sunxi_chip->chip.dev	= &pdev->dev;
	sunxi_chip->chip.label	= "A1X_GPIO";
	sunxi_chip->chip.base	= 1;
	sunxi_chip->chip.names	= (const char *const *)pnames;
	sunxi_chip->irq_base	= -1;

	/* configure EINTs for the detected SoC */
	sunxi_gpio_eint_probe();

	/* This needs additional system irq numbers (NR_IRQ=NR_IRQ+EINT_NUM) */
	if (EINT_NUM > 0) {
		sunxi_chip->irq_base = irq_alloc_descs(-1, 0, EINT_NUM, 0);
		if (sunxi_chip->irq_base < 0) {
			err = sunxi_chip->irq_base;
			pr_err("Couldn't allocate virq numbers err %d. GPIO irq support disabled\n", err);
		}
	} else
		pr_info("GPIO irq support disabled in this platform\n");

	spin_lock_init(&sunxi_chip->irq_lock);
	sunxi_gpio_irq_init(sunxi_chip);

	if (sunxi_chip->irq_base >= 0) {
		err = request_irq(GPIO_IRQ_NO, sunxi_gpio_irq_handler,
				  IRQF_SHARED, "sunxi-gpio", sunxi_chip);
		if (err) {
			pr_err("Can't request irq %d\n", GPIO_IRQ_NO);
			goto irqchip;
		}
	}

	err = gpiochip_add(&sunxi_chip->chip);
	if (err < 0)
		goto irqhdl;

	platform_set_drvdata(pdev, sunxi_chip);
	return 0;

irqhdl:
	if (sunxi_chip->irq_base >= 0)
		free_irq(GPIO_IRQ_NO, sunxi_chip);
irqchip:
	sunxi_gpio_irq_remove(sunxi_chip);
	if (sunxi_chip->irq_base >= 0)
		irq_free_descs(sunxi_chip->irq_base, EINT_NUM);
unmap:
	iounmap(sunxi_chip->gaddr);
exit:
	kfree(sunxi_chip);
	kfree(pnames);

	return err;
}
示例#23
0
/**
 * regmap_add_irq_chip(): Use standard regmap IRQ controller handling
 *
 * map:       The regmap for the device.
 * irq:       The IRQ the device uses to signal interrupts
 * irq_flags: The IRQF_ flags to use for the primary interrupt.
 * chip:      Configuration for the interrupt controller.
 * data:      Runtime data structure for the controller, allocated on success
 *
 * Returns 0 on success or an errno on failure.
 *
 * In order for this to be efficient the chip really should use a
 * register cache.  The chip driver is responsible for restoring the
 * register values used by the IRQ controller over suspend and resume.
 */
int regmap_add_irq_chip(struct regmap *map, int irq, int irq_flags,
			int irq_base, const struct regmap_irq_chip *chip,
			struct regmap_irq_chip_data **data)
{
	struct regmap_irq_chip_data *d;
	int i;
	int ret = -ENOMEM;
	u32 reg;
	u32 unmask_offset;

	if (chip->num_regs <= 0)
		return -EINVAL;

	for (i = 0; i < chip->num_irqs; i++) {
		if (chip->irqs[i].reg_offset % map->reg_stride)
			return -EINVAL;
		if (chip->irqs[i].reg_offset / map->reg_stride >=
		    chip->num_regs)
			return -EINVAL;
	}

	if (irq_base) {
		irq_base = irq_alloc_descs(irq_base, 0, chip->num_irqs, 0);
		if (irq_base < 0) {
			dev_warn(map->dev, "Failed to allocate IRQs: %d\n",
				 irq_base);
			return irq_base;
		}
	}

	d = kzalloc(sizeof(*d), GFP_KERNEL);
	if (!d)
		return -ENOMEM;

	d->status_buf = kcalloc(chip->num_regs, sizeof(unsigned int),
				GFP_KERNEL);
	if (!d->status_buf)
		goto err_alloc;

	d->mask_buf = kcalloc(chip->num_regs, sizeof(unsigned int),
			      GFP_KERNEL);
	if (!d->mask_buf)
		goto err_alloc;

	d->mask_buf_def = kcalloc(chip->num_regs, sizeof(unsigned int),
				  GFP_KERNEL);
	if (!d->mask_buf_def)
		goto err_alloc;

	if (chip->wake_base) {
		d->wake_buf = kcalloc(chip->num_regs, sizeof(unsigned int),
				      GFP_KERNEL);
		if (!d->wake_buf)
			goto err_alloc;
	}

	if (chip->num_type_reg) {
		d->type_buf_def = kcalloc(chip->num_type_reg,
					sizeof(unsigned int), GFP_KERNEL);
		if (!d->type_buf_def)
			goto err_alloc;

		d->type_buf = kcalloc(chip->num_type_reg, sizeof(unsigned int),
				      GFP_KERNEL);
		if (!d->type_buf)
			goto err_alloc;
	}

	d->irq_chip = regmap_irq_chip;
	d->irq_chip.name = chip->name;
	d->irq = irq;
	d->map = map;
	d->chip = chip;
	d->irq_base = irq_base;

	if (chip->irq_reg_stride)
		d->irq_reg_stride = chip->irq_reg_stride;
	else
		d->irq_reg_stride = 1;

	if (chip->type_reg_stride)
		d->type_reg_stride = chip->type_reg_stride;
	else
		d->type_reg_stride = 1;

	if (!map->use_single_read && map->reg_stride == 1 &&
	    d->irq_reg_stride == 1) {
		d->status_reg_buf = kmalloc_array(chip->num_regs,
						  map->format.val_bytes,
						  GFP_KERNEL);
		if (!d->status_reg_buf)
			goto err_alloc;
	}

	mutex_init(&d->lock);

	for (i = 0; i < chip->num_irqs; i++)
		d->mask_buf_def[chip->irqs[i].reg_offset / map->reg_stride]
			|= chip->irqs[i].mask;

	/* Mask all the interrupts by default */
	for (i = 0; i < chip->num_regs; i++) {
		d->mask_buf[i] = d->mask_buf_def[i];
		reg = chip->mask_base +
			(i * map->reg_stride * d->irq_reg_stride);
		if (chip->mask_invert)
			ret = regmap_update_bits(map, reg,
					 d->mask_buf[i], ~d->mask_buf[i]);
		else if (d->chip->unmask_base) {
			unmask_offset = d->chip->unmask_base -
					d->chip->mask_base;
			ret = regmap_update_bits(d->map,
					reg + unmask_offset,
					d->mask_buf[i],
					d->mask_buf[i]);
		} else
			ret = regmap_update_bits(map, reg,
					 d->mask_buf[i], d->mask_buf[i]);
		if (ret != 0) {
			dev_err(map->dev, "Failed to set masks in 0x%x: %d\n",
				reg, ret);
			goto err_alloc;
		}

		if (!chip->init_ack_masked)
			continue;

		/* Ack masked but set interrupts */
		reg = chip->status_base +
			(i * map->reg_stride * d->irq_reg_stride);
		ret = regmap_read(map, reg, &d->status_buf[i]);
		if (ret != 0) {
			dev_err(map->dev, "Failed to read IRQ status: %d\n",
				ret);
			goto err_alloc;
		}

		if (d->status_buf[i] && (chip->ack_base || chip->use_ack)) {
			reg = chip->ack_base +
				(i * map->reg_stride * d->irq_reg_stride);
			if (chip->ack_invert)
				ret = regmap_write(map, reg,
					~(d->status_buf[i] & d->mask_buf[i]));
			else
				ret = regmap_write(map, reg,
					d->status_buf[i] & d->mask_buf[i]);
			if (ret != 0) {
				dev_err(map->dev, "Failed to ack 0x%x: %d\n",
					reg, ret);
				goto err_alloc;
			}
		}
	}

	/* Wake is disabled by default */
	if (d->wake_buf) {
		for (i = 0; i < chip->num_regs; i++) {
			d->wake_buf[i] = d->mask_buf_def[i];
			reg = chip->wake_base +
				(i * map->reg_stride * d->irq_reg_stride);

			if (chip->wake_invert)
				ret = regmap_update_bits(map, reg,
							 d->mask_buf_def[i],
							 0);
			else
				ret = regmap_update_bits(map, reg,
							 d->mask_buf_def[i],
							 d->wake_buf[i]);
			if (ret != 0) {
				dev_err(map->dev, "Failed to set masks in 0x%x: %d\n",
					reg, ret);
				goto err_alloc;
			}
		}
	}

	if (chip->num_type_reg) {
		for (i = 0; i < chip->num_irqs; i++) {
			reg = chip->irqs[i].type_reg_offset / map->reg_stride;
			d->type_buf_def[reg] |= chip->irqs[i].type_rising_mask |
					chip->irqs[i].type_falling_mask;
		}
		for (i = 0; i < chip->num_type_reg; ++i) {
			if (!d->type_buf_def[i])
				continue;

			reg = chip->type_base +
				(i * map->reg_stride * d->type_reg_stride);
			if (chip->type_invert)
				ret = regmap_update_bits(map, reg,
					d->type_buf_def[i], 0xFF);
			else
				ret = regmap_update_bits(map, reg,
					d->type_buf_def[i], 0x0);
			if (ret != 0) {
				dev_err(map->dev,
					"Failed to set type in 0x%x: %x\n",
					reg, ret);
				goto err_alloc;
			}
		}
	}

	if (irq_base)
		d->domain = irq_domain_add_legacy(map->dev->of_node,
						  chip->num_irqs, irq_base, 0,
						  &regmap_domain_ops, d);
	else
		d->domain = irq_domain_add_linear(map->dev->of_node,
						  chip->num_irqs,
						  &regmap_domain_ops, d);
	if (!d->domain) {
		dev_err(map->dev, "Failed to create IRQ domain\n");
		ret = -ENOMEM;
		goto err_alloc;
	}

	ret = request_threaded_irq(irq, NULL, regmap_irq_thread,
				   irq_flags | IRQF_ONESHOT,
				   chip->name, d);
	if (ret != 0) {
		dev_err(map->dev, "Failed to request IRQ %d for %s: %d\n",
			irq, chip->name, ret);
		goto err_domain;
	}

	*data = d;

	return 0;

err_domain:
	/* Should really dispose of the domain but... */
err_alloc:
	kfree(d->type_buf);
	kfree(d->type_buf_def);
	kfree(d->wake_buf);
	kfree(d->mask_buf_def);
	kfree(d->mask_buf);
	kfree(d->status_buf);
	kfree(d->status_reg_buf);
	kfree(d);
	return ret;
}
/**
 * regmap_add_irq_chip(): Use standard regmap IRQ controller handling
 *
 * map:       The regmap for the device.
 * irq:       The IRQ the device uses to signal interrupts
 * irq_flags: The IRQF_ flags to use for the primary interrupt.
 * chip:      Configuration for the interrupt controller.
 * data:      Runtime data structure for the controller, allocated on success
 *
 * Returns 0 on success or an errno on failure.
 *
 * In order for this to be efficient the chip really should use a
 * register cache.  The chip driver is responsible for restoring the
 * register values used by the IRQ controller over suspend and resume.
 */
int regmap_add_irq_chip(struct regmap *map, int irq, int irq_flags,
			int irq_base, struct regmap_irq_chip *chip,
			struct regmap_irq_chip_data **data)
{
	struct regmap_irq_chip_data *d;
	int cur_irq, i;
	int ret = -ENOMEM;

	irq_base = irq_alloc_descs(irq_base, 0, chip->num_irqs, 0);
	if (irq_base < 0) {
		dev_warn(map->dev, "Failed to allocate IRQs: %d\n",
			 irq_base);
		return irq_base;
	}

	d = kzalloc(sizeof(*d), GFP_KERNEL);
	if (!d)
		return -ENOMEM;

	*data = d;

	*data = d;

	d->status_buf = kzalloc(sizeof(unsigned int) * chip->num_regs,
				GFP_KERNEL);
	if (!d->status_buf)
		goto err_alloc;

	d->status_reg_buf = kzalloc(map->format.val_bytes * chip->num_regs,
				    GFP_KERNEL);
	if (!d->status_reg_buf)
		goto err_alloc;

	d->mask_buf = kzalloc(sizeof(unsigned int) * chip->num_regs,
			      GFP_KERNEL);
	if (!d->mask_buf)
		goto err_alloc;

	d->mask_buf_def = kzalloc(sizeof(unsigned int) * chip->num_regs,
				  GFP_KERNEL);
	if (!d->mask_buf_def)
		goto err_alloc;

	d->map = map;
	d->chip = chip;
	d->irq_base = irq_base;
	mutex_init(&d->lock);

	for (i = 0; i < chip->num_irqs; i++)
		d->mask_buf_def[chip->irqs[i].reg_offset]
			|= chip->irqs[i].mask;

	/* Mask all the interrupts by default */
	for (i = 0; i < chip->num_regs; i++) {
		d->mask_buf[i] = d->mask_buf_def[i];
		if (chip->mask_invert)
			ret = regmap_write(map, chip->mask_base + i,
					~d->mask_buf[i]);
		else
			ret = regmap_write(map, chip->mask_base + i,
					d->mask_buf[i]);
		if (ret != 0) {
			dev_err(map->dev, "Failed to set masks in 0x%x: %d\n",
				chip->mask_base + i, ret);
			goto err_alloc;
		}
/* need to wait between 32K register for 88PM805 */
#ifdef CONFIG_MFD_88PM805
		if (!strcmp("88pm805", chip->name))
			msleep(1);
#endif
	}

	/* Register them with genirq */
	for (cur_irq = irq_base;
	     cur_irq < chip->num_irqs + irq_base;
	     cur_irq++) {
		irq_set_chip_data(cur_irq, d);
		irq_set_chip_and_handler(cur_irq, &regmap_irq_chip,
					 handle_edge_irq);
		irq_set_nested_thread(cur_irq, 1);

		/* ARM needs us to explicitly flag the IRQ as valid
		 * and will set them noprobe when we do so. */
#ifdef CONFIG_ARM
		set_irq_flags(cur_irq, IRQF_VALID);
#else
		irq_set_noprobe(cur_irq);
#endif
	}

	ret = request_threaded_irq(irq, NULL, regmap_irq_thread, irq_flags,
				   chip->name, d);
	if (ret != 0) {
		dev_err(map->dev, "Failed to request IRQ %d: %d\n", irq, ret);
		goto err_alloc;
	}

	return 0;

err_alloc:
	kfree(d->mask_buf_def);
	kfree(d->mask_buf);
	kfree(d->status_reg_buf);
	kfree(d->status_buf);
	kfree(d);
	return ret;
}
示例#25
0
/**
 * xgpiops_probe - Initialization method for a xgpiops device
 * @pdev:	platform device instance
 *
 * This function allocates memory resources for the gpio device and registers
 * all the banks of the device. It will also set up interrupts for the gpio
 * pins.
 * Note: Interrupts are disabled for all the banks during initialization.
 * Returns 0 on success, negative error otherwise.
 */
static int xgpiops_probe(struct platform_device *pdev)
{
	int ret;
	unsigned int irq_num;
	struct xgpiops *gpio;
	struct gpio_chip *chip;
	resource_size_t remap_size;
	struct resource *mem_res = NULL;
	int pin_num, bank_num, gpio_irq;

	gpio = kzalloc(sizeof(struct xgpiops), GFP_KERNEL);
	if (!gpio) {
		dev_err(&pdev->dev,
			"couldn't allocate memory for gpio private data\n");
		return -ENOMEM;
	}

	spin_lock_init(&gpio->gpio_lock);

	platform_set_drvdata(pdev, gpio);

	mem_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
	if (!mem_res) {
		dev_err(&pdev->dev, "No memory resource\n");
		ret = -ENODEV;
		goto err_free_gpio;
	}

	remap_size = mem_res->end - mem_res->start + 1;
	if (!request_mem_region(mem_res->start, remap_size, pdev->name)) {
		dev_err(&pdev->dev, "Cannot request IO\n");
		ret = -ENXIO;
		goto err_free_gpio;
	}

	gpio->base_addr = ioremap(mem_res->start, remap_size);
	if (gpio->base_addr == NULL) {
		dev_err(&pdev->dev, "Couldn't ioremap memory at 0x%08lx\n",
			(unsigned long)mem_res->start);
		ret = -ENOMEM;
		goto err_release_region;
	}

	irq_num = platform_get_irq(pdev, 0);
	gpio->irq = irq_num;

	/* configure the gpio chip */
	chip = &gpio->chip;
	chip->label = "xgpiops";
	chip->owner = THIS_MODULE;
	chip->dev = &pdev->dev;
	chip->get = xgpiops_get_value;
	chip->set = xgpiops_set_value;
	chip->request = xgpiops_request;
	chip->free = xgpiops_free;
	chip->direction_input = xgpiops_dir_in;
	chip->direction_output = xgpiops_dir_out;
	chip->to_irq = xgpiops_to_irq;
	chip->dbg_show = NULL;
	chip->base = 0;		/* default pin base */
	chip->ngpio = XGPIOPS_NR_GPIOS;
	chip->can_sleep = 0;

	gpio->irq_base = irq_alloc_descs(-1, 0, chip->ngpio, 0);
	if (gpio->irq_base < 0) {
		dev_err(&pdev->dev, "Couldn't allocate IRQ numbers\n");
		ret = -ENODEV;
		goto err_iounmap;
	}

	irq_domain = irq_domain_add_legacy(pdev->dev.of_node,
					   chip->ngpio, gpio->irq_base, 0,
					   &irq_domain_simple_ops, NULL);

	/* report a bug if gpio chip registration fails */
	ret = gpiochip_add(chip);
	if (ret < 0) {
		dev_err(&pdev->dev, "gpio chip registration failed\n");
		goto err_iounmap;
	} else {
		dev_info(&pdev->dev, "gpio at 0x%08lx mapped to 0x%08lx\n",
			 (unsigned long)mem_res->start,
			 (unsigned long)gpio->base_addr);
	}

	/* Enable GPIO clock */
	gpio->clk = clk_get(&pdev->dev, NULL);
	if (IS_ERR(gpio->clk)) {
		dev_err(&pdev->dev, "input clock not found.\n");
		ret = PTR_ERR(gpio->clk);
		goto err_chip_remove;
	}
	ret = clk_prepare_enable(gpio->clk);
	if (ret) {
		dev_err(&pdev->dev, "Unable to enable clock.\n");
		goto err_clk_put;
	}

	/* disable interrupts for all banks */
	for (bank_num = 0; bank_num < 4; bank_num++) {
		xgpiops_writereg(0xffffffff, gpio->base_addr +
				  XGPIOPS_INTDIS_OFFSET(bank_num));
	}

	/*
	 * set the irq chip, handler and irq chip data for callbacks for
	 * each pin
	 */
	for (pin_num = 0; pin_num < min_t(int, XGPIOPS_NR_GPIOS,
						(int)chip->ngpio); pin_num++) {
		gpio_irq = irq_find_mapping(irq_domain, pin_num);
		irq_set_chip_and_handler(gpio_irq, &xgpiops_irqchip,
							handle_simple_irq);
		irq_set_chip_data(gpio_irq, (void *)gpio);
		set_irq_flags(gpio_irq, IRQF_VALID);
	}

	irq_set_handler_data(irq_num, (void *)gpio);
	irq_set_chained_handler(irq_num, xgpiops_irqhandler);

	xgpiops_pm_runtime_init(pdev);

	device_set_wakeup_capable(&pdev->dev, 1);

	return 0;

err_clk_put:
	clk_put(gpio->clk);
err_chip_remove:
	gpiochip_remove(chip);
err_iounmap:
	iounmap(gpio->base_addr);
err_release_region:
	release_mem_region(mem_res->start, remap_size);
err_free_gpio:
	platform_set_drvdata(pdev, NULL);
	kfree(gpio);

	return ret;
}
示例#26
0
static int __devinit palmas_i2c_probe(struct i2c_client *i2c,
			    const struct i2c_device_id *id)
{
	struct palmas *palmas;
	struct palmas_platform_data *mfd_platform_data;
	int ret = 0, i;
	unsigned int reg, addr;
	int slave;
	char *rname;

	mfd_platform_data = dev_get_platdata(&i2c->dev);
	if (!mfd_platform_data)
		return -EINVAL;

	palmas = kzalloc(sizeof(struct palmas), GFP_KERNEL);
	if (palmas == NULL)
		return -ENOMEM;

	i2c_set_clientdata(i2c, palmas);
	palmas->dev = &i2c->dev;
	palmas->id = id->driver_data;

	ret = irq_alloc_descs(-1, 0, PALMAS_NUM_IRQ, 0);
	if (ret < 0) {
		dev_err(&i2c->dev, "failed to allocate IRQ descs\n");
		goto err;
	}

	palmas->irq = i2c->irq;
	palmas->irq_base = ret;
	palmas->irq_end = ret + PALMAS_NUM_IRQ;

	for (i = 0; i < PALMAS_NUM_CLIENTS; i++) {
		if (i == 0)
			palmas->i2c_clients[i] = i2c;
		else {
			palmas->i2c_clients[i] =
					i2c_new_dummy(i2c->adapter,
							i2c->addr + i);
			if (!palmas->i2c_clients[i]) {
				dev_err(palmas->dev,
					"can't attach client %d\n", i);
				ret = -ENOMEM;
				goto err;
			}
		}
		palmas->regmap[i] = regmap_init_i2c(palmas->i2c_clients[i],
				&palmas_regmap_config[i]);
		if (IS_ERR(palmas->regmap[i])) {
			ret = PTR_ERR(palmas->regmap[i]);
			dev_err(palmas->dev, "Failed to allocate register map "
					"No: %d, because: %d\n", i, ret);
			goto err;
		}
	}

	ret = palmas_irq_init(palmas);
	if (ret < 0)
		goto err;

	slave = PALMAS_BASE_TO_SLAVE(PALMAS_DESIGNREV_BASE);
	addr = PALMAS_BASE_TO_REG(PALMAS_DESIGNREV_BASE, 0);
	/*
	 * Revision either
	 * PALMAS_REV_ES1_0 or
	 * PALMAS_REV_ES2_0 or
	 * PALMAS_REV_ES2_1
	 */
	ret = regmap_read(palmas->regmap[slave], addr, &reg);
	if (ret)
		goto err;

	palmas->revision = reg;
	switch (palmas->revision) {
	case PALMAS_REV_ES1_0:
		rname = "ES 1.0";
		break;
	case PALMAS_REV_ES2_0:
		rname = "ES 2.0";
		break;
	case PALMAS_REV_ES2_1:
		rname = "ES 2.1";
		break;
	default:
		rname = "unknown";
		break;
	}
	dev_info(palmas->dev, "%s %s detected\n", id->name, rname);

	slave = PALMAS_BASE_TO_SLAVE(PALMAS_PU_PD_OD_BASE);
	addr = PALMAS_BASE_TO_REG(PALMAS_PU_PD_OD_BASE,
			PALMAS_PRIMARY_SECONDARY_PAD1);

	if (mfd_platform_data->mux_from_pdata) {
		reg = mfd_platform_data->pad1;
		ret = regmap_write(palmas->regmap[slave], addr, reg);
		if (ret)
			goto err;
	} else {
		ret = regmap_read(palmas->regmap[slave], addr, &reg);
		if (ret)
			goto err;
	}

	if (!(reg & PRIMARY_SECONDARY_PAD1_GPIO_0))
		palmas->gpio_muxed |= PALMAS_GPIO_0_MUXED;
	if (!(reg & PRIMARY_SECONDARY_PAD1_GPIO_1_MASK))
		palmas->gpio_muxed |= PALMAS_GPIO_1_MUXED;
	else if ((reg & PRIMARY_SECONDARY_PAD1_GPIO_1_MASK) ==
			(2 << PRIMARY_SECONDARY_PAD1_GPIO_1_SHIFT))
		palmas->led_muxed |= PALMAS_LED1_MUXED;
	else if ((reg & PRIMARY_SECONDARY_PAD1_GPIO_1_MASK) ==
			(3 << PRIMARY_SECONDARY_PAD1_GPIO_1_SHIFT))
		palmas->pwm_muxed |= PALMAS_PWM1_MUXED;
	if (!(reg & PRIMARY_SECONDARY_PAD1_GPIO_2_MASK))
		palmas->gpio_muxed |= PALMAS_GPIO_2_MUXED;
	else if ((reg & PRIMARY_SECONDARY_PAD1_GPIO_2_MASK) ==
			(2 << PRIMARY_SECONDARY_PAD1_GPIO_2_SHIFT))
		palmas->led_muxed |= PALMAS_LED2_MUXED;
	else if ((reg & PRIMARY_SECONDARY_PAD1_GPIO_2_MASK) ==
			(3 << PRIMARY_SECONDARY_PAD1_GPIO_2_SHIFT))
		palmas->pwm_muxed |= PALMAS_PWM2_MUXED;
	if (!(reg & PRIMARY_SECONDARY_PAD1_GPIO_3))
		palmas->gpio_muxed |= PALMAS_GPIO_3_MUXED;

	addr = PALMAS_BASE_TO_REG(PALMAS_PU_PD_OD_BASE,
			PALMAS_PRIMARY_SECONDARY_PAD2);

	if (mfd_platform_data->mux_from_pdata) {
		reg = mfd_platform_data->pad2;
		ret = regmap_write(palmas->regmap[slave], addr, reg);
		if (ret)
			goto err;
	} else {
		ret = regmap_read(palmas->regmap[slave], addr, &reg);
		if (ret)
			goto err;
	}

	if (!(reg & PRIMARY_SECONDARY_PAD2_GPIO_4))
		palmas->gpio_muxed |= PALMAS_GPIO_4_MUXED;
	if (!(reg & PRIMARY_SECONDARY_PAD2_GPIO_5_MASK))
		palmas->gpio_muxed |= PALMAS_GPIO_5_MUXED;
	if (!(reg & PRIMARY_SECONDARY_PAD2_GPIO_6))
		palmas->gpio_muxed |= PALMAS_GPIO_6_MUXED;
	if (!(reg & PRIMARY_SECONDARY_PAD2_GPIO_7_MASK))
		palmas->gpio_muxed |= PALMAS_GPIO_7_MUXED;

	dev_info(palmas->dev, "Muxing GPIO %x, PWM %x, LED %x\n",
			palmas->gpio_muxed, palmas->pwm_muxed,
			palmas->led_muxed);

	reg = mfd_platform_data->power_ctrl;

	slave = PALMAS_BASE_TO_SLAVE(PALMAS_PMU_CONTROL_BASE);
	addr = PALMAS_BASE_TO_REG(PALMAS_PMU_CONTROL_BASE, PALMAS_POWER_CTRL);

	ret = regmap_write(palmas->regmap[slave], addr, reg);
	if (ret)
		goto err;

	palmas_rtc_init(palmas);

	ret = mfd_add_devices(palmas->dev, -1,
			      palmas_children, ARRAY_SIZE(palmas_children),
			      NULL, palmas->irq_base);
	if (ret < 0)
		goto err;

	return ret;

err:
	mfd_remove_devices(palmas->dev);
	kfree(palmas);
	return ret;
}
示例#27
0
static int max77833_i2c_probe(struct i2c_client *i2c,
				const struct i2c_device_id *dev_id)
{
	struct max77833_dev *max77833;
	struct max77833_platform_data *pdata = i2c->dev.platform_data;

	u8 reg_data;
	u16 reg16_data;
	int ret = 0;

	pr_info("%s:%s\n", MFD_DEV_NAME, __func__);

	max77833 = kzalloc(sizeof(struct max77833_dev), GFP_KERNEL);
	if (!max77833) {
		dev_err(&i2c->dev, "%s: Failed to alloc mem for max77833\n", __func__);
		return -ENOMEM;
	}

	if (i2c->dev.of_node) {
		pdata = devm_kzalloc(&i2c->dev, sizeof(struct max77833_platform_data),
				GFP_KERNEL);
		if (!pdata) {
			dev_err(&i2c->dev, "Failed to allocate memory \n");
			ret = -ENOMEM;
			goto err;
		}

		ret = of_max77833_dt(&i2c->dev, pdata);
		if (ret < 0){
			dev_err(&i2c->dev, "Failed to get device of_node \n");
			goto err;
		}

		i2c->dev.platform_data = pdata;
	} else
		pdata = i2c->dev.platform_data;

	max77833->dev = &i2c->dev;
	max77833->i2c = i2c;
	max77833->irq = i2c->irq;
	if (pdata) {
		max77833->pdata = pdata;

		pdata->irq_base = irq_alloc_descs(-1, 0, MAX77833_IRQ_NR, -1);
		if (pdata->irq_base < 0) {
			pr_err("%s:%s irq_alloc_descs Fail! ret(%d)\n",
					MFD_DEV_NAME, __func__, pdata->irq_base);
			ret = -EINVAL;
			goto err;
		} else
			max77833->irq_base = pdata->irq_base;

		max77833->irq_gpio = pdata->irq_gpio;
		max77833->wakeup = pdata->wakeup;
	} else {
		ret = -EINVAL;
		goto err;
	}
	mutex_init(&max77833->i2c_lock);

	i2c_set_clientdata(i2c, max77833);

	if (max77833_read_reg(i2c, MAX77833_PMIC_REG_PMICREV, &reg_data) < 0) {
		dev_err(max77833->dev,
			"device not found on this channel (this is not an error)\n");
		ret = -ENODEV;
		goto err_w_lock;
	} else {
		/* print rev */
		max77833->pmic_rev = (reg_data & 0x7);
		max77833->pmic_ver = ((reg_data & 0xF8) >> 0x3);
		pr_info("%s:%s device found: rev.0x%x, ver.0x%x\n",
				MFD_DEV_NAME, __func__,
				max77833->pmic_rev, max77833->pmic_ver);
	}

	/* No active discharge on safeout ldo 1,2 */
	max77833_update_reg(i2c, MAX77833_PMIC_REG_SAFEOUT_CTRL, 0x00, 0x30);
	max77833_update_reg(i2c, MAX77833_PMIC_REG_SAFEOUT_CTRL, 0x0, 0x40);
	max77833_read_reg(i2c, MAX77833_PMIC_REG_SAFEOUT_CTRL, &reg_data);
	pr_info("%s:%s reg[0x%02x]: 0x%02x\n", MFD_DEV_NAME, __func__,
			MAX77833_PMIC_REG_SAFEOUT_CTRL, reg_data);

	max77833->muic = i2c_new_dummy(i2c->adapter, I2C_ADDR_MUIC);
	i2c_set_clientdata(max77833->muic, max77833);

	max77833->fuelgauge = i2c_new_dummy(i2c->adapter, I2C_ADDR_FG);
	i2c_set_clientdata(max77833->fuelgauge, max77833);

	/* checking pass5 in OTP */
	max77833_write_fg(max77833->fuelgauge, 0x00D6, 0x00E5);
	max77833_write_fg(max77833->fuelgauge, 0x00D8, 0x00D2);
	max77833_read_fg(max77833->fuelgauge, 0x04DC, &reg16_data);
	max77833->pmic_rev_pass5 = ((reg16_data & 0xFF) >= 0x52) ? true : false;
	pr_info("%s:%s [0x04DC : 0x%04x]\n", __func__,
		(max77833->pmic_rev_pass5) ? "over PASS5" : "under PASS5", reg16_data);

	ret = max77833_irq_init(max77833);

	if (ret < 0)
		goto err_irq_init;

	ret = mfd_add_devices(max77833->dev, -1, max77833_devs,
			ARRAY_SIZE(max77833_devs), NULL, 0, NULL);
	if (ret < 0)
		goto err_mfd;

	device_init_wakeup(max77833->dev, pdata->wakeup);

	return ret;

err_mfd:
	mfd_remove_devices(max77833->dev);
err_irq_init:
	i2c_unregister_device(max77833->muic);
err_w_lock:
	mutex_destroy(&max77833->i2c_lock);
err:
	kfree(max77833);
	return ret;
}
示例#28
0
static int sch_gpio_probe(struct platform_device *pdev)
{
	struct resource *res;
	struct sch_gpio *chip;
	int err, id;

	chip = kzalloc(sizeof(*chip), GFP_KERNEL);
	if (chip == NULL)
		return -ENOMEM;

	chip_ptr = chip;

	sch_gpio_core_save_state(&(chip->initial_core));
	sch_gpio_resume_save_state(&(chip->initial_resume));

	id = pdev->id;
	if (!id)
		return -ENODEV;

	/* Get UIO memory */
	info = kzalloc(sizeof(struct uio_info), GFP_KERNEL);
	if (!info)
		return -ENOMEM;

	res = platform_get_resource(pdev, IORESOURCE_IO, 0);
	if (!res)
		return -EBUSY;

	if (!request_region(res->start, resource_size(res), pdev->name))
		return -EBUSY;

	gpio_ba = res->start;

	irq_num = RESOURCE_IRQ;

	switch (id) {
	case PCI_DEVICE_ID_INTEL_SCH_LPC:
		sch_gpio_core.base = 0;
		sch_gpio_core.ngpio = 10;

		sch_gpio_resume.base = 10;
		sch_gpio_resume.ngpio = 4;

		/*
		 * GPIO[6:0] enabled by default
		 * GPIO7 is configured by the CMC as SLPIOVR
		 * Enable GPIO[9:8] core powered gpios explicitly
		 */
		outb(0x3, gpio_ba + CGEN + 1);
		/*
		 * SUS_GPIO[2:0] enabled by default
		 * Enable SUS_GPIO3 resume powered gpio explicitly
		 */
		outb(0x8, gpio_ba + RGEN);
		break;

	case PCI_DEVICE_ID_INTEL_ITC_LPC:
		sch_gpio_core.base = 0;
		sch_gpio_core.ngpio = 5;

		sch_gpio_resume.base = 5;
		sch_gpio_resume.ngpio = 9;
		break;

	case PCI_DEVICE_ID_INTEL_CENTERTON_ILB:
		sch_gpio_core.base = 0;
		sch_gpio_core.ngpio = 21;

		sch_gpio_resume.base = 21;
		sch_gpio_resume.ngpio = 9;
		break;

	case PCI_DEVICE_ID_INTEL_QUARK_ILB:
		sch_gpio_core.base = 0;
		sch_gpio_core.ngpio = 2;

		sch_gpio_resume.base = 2;
		sch_gpio_resume.ngpio = 6;
		break;

	default:
		err = -ENODEV;
		goto err_sch_gpio_core;
	}

	sch_gpio_core.dev = &pdev->dev;
	sch_gpio_resume.dev = &pdev->dev;

	err = gpiochip_add(&sch_gpio_core);
	if (err < 0)
		goto err_sch_gpio_core;

	err = gpiochip_add(&sch_gpio_resume);
	if (err < 0)
		goto err_sch_gpio_resume;

	chip->irq_base_core = irq_alloc_descs(-1, 0,
						sch_gpio_core.ngpio,
						NUMA_NO_NODE);
	if (chip->irq_base_core < 0) {
		dev_err(&pdev->dev, "failure adding GPIO core IRQ descs\n");
		chip->irq_base_core = -1;
		goto err_sch_intr_core;
	}

	chip->irq_base_resume = irq_alloc_descs(-1, 0,
						sch_gpio_resume.ngpio,
						NUMA_NO_NODE);
	if (chip->irq_base_resume < 0) {
		dev_err(&pdev->dev, "failure adding GPIO resume IRQ descs\n");
		chip->irq_base_resume = -1;
		goto err_sch_intr_resume;
	}

	platform_set_drvdata(pdev, chip);

	err = platform_device_register(&qrk_gpio_restrict_pdev);
	if (err < 0)
		goto err_sch_gpio_device_register;

	/* disable interrupts */
	sch_gpio_core_irq_disable_all(chip, sch_gpio_core.ngpio);
	sch_gpio_resume_irq_disable_all(chip, sch_gpio_resume.ngpio);


	err = request_irq(irq_num, sch_gpio_irq_handler,
				IRQF_SHARED, KBUILD_MODNAME, chip);
	if (err != 0) {
			dev_err(&pdev->dev,
				"%s request_irq failed\n", __func__);
			goto err_sch_request_irq;
	}

	sch_gpio_core_irqs_init(chip, sch_gpio_core.ngpio);
	sch_gpio_resume_irqs_init(chip, sch_gpio_resume.ngpio);

	/* UIO */
	info->port[0].name = "gpio_regs";
	info->port[0].start = res->start;
	info->port[0].size = resource_size(res);
	info->port[0].porttype = UIO_PORT_X86;
	info->name = "sch_gpio";
	info->version = "0.0.1";

	if (uio_register_device(&pdev->dev, info))
		goto err_sch_uio_register;

	pr_info("%s UIO port addr 0x%04x size %lu porttype %d\n",
		__func__, (unsigned int)info->port[0].start,
		info->port[0].size, info->port[0].porttype);

	return 0;

err_sch_uio_register:
	free_irq(irq_num, chip);

err_sch_request_irq:
	platform_device_unregister(&qrk_gpio_restrict_pdev);

err_sch_gpio_device_register:
	irq_free_descs(chip->irq_base_resume, sch_gpio_resume.ngpio);

err_sch_intr_resume:
	irq_free_descs(chip->irq_base_core, sch_gpio_core.ngpio);

err_sch_intr_core:
	gpiochip_remove(&sch_gpio_resume);

err_sch_gpio_resume:
	gpiochip_remove(&sch_gpio_core);

err_sch_gpio_core:
	release_region(res->start, resource_size(res));
	gpio_ba = 0;

	sch_gpio_resume_restore_state(&(chip->initial_resume));
	sch_gpio_core_restore_state(&(chip->initial_core));

	kfree(chip);
	chip_ptr = 0;

	if (info != NULL)
		kfree(info);

	return err;
}
static int max77888_i2c_probe(struct i2c_client *i2c,
				const struct i2c_device_id *dev_id)
{
	struct max77888_dev *max77888;
	struct max77888_platform_data *pdata = i2c->dev.platform_data;

	u8 reg_data;
	int ret = 0;

	pr_info("%s:%s\n", MFD_DEV_NAME, __func__);

	max77888 = kzalloc(sizeof(struct max77888_dev), GFP_KERNEL);
	if (!max77888) {
		dev_err(&i2c->dev, "%s: Failed to alloc mem for max77888\n", __func__);
		return -ENOMEM;
	}

	if (i2c->dev.of_node) {
		pdata = devm_kzalloc(&i2c->dev, sizeof(struct max77888_platform_data),
				GFP_KERNEL);
		if (!pdata) {
			dev_err(&i2c->dev, "Failed to allocate memory \n");
			ret = -ENOMEM;
			goto err;
		}

		ret = of_max77888_dt(&i2c->dev, pdata);
		if (ret < 0){
			dev_err(&i2c->dev, "Failed to get device of_node \n");
			goto err;
		}

		i2c->dev.platform_data = pdata;
	} else
		pdata = i2c->dev.platform_data;

	max77888->dev = &i2c->dev;
	max77888->i2c = i2c;
	max77888->irq = i2c->irq;
	if (pdata) {
		max77888->pdata = pdata;

		pdata->irq_base = irq_alloc_descs(-1, 0, MAX77888_IRQ_NR, -1);
		if (pdata->irq_base < 0) {
			pr_err("%s:%s irq_alloc_descs Fail! ret(%d)\n",
					MFD_DEV_NAME, __func__, pdata->irq_base);
			ret = -EINVAL;
			goto err;
		} else
			max77888->irq_base = pdata->irq_base;

		max77888->irq_gpio = pdata->irq_gpio;
		max77888->irqf_trigger = pdata->irqf_trigger;
		max77888->wakeup = pdata->wakeup;
	} else {
		ret = -EINVAL;
		goto err;
	}
	mutex_init(&max77888->i2c_lock);

	i2c_set_clientdata(i2c, max77888);

	if (max77888_read_reg(i2c, MAX77888_PMIC_REG_PMIC_ID2, &reg_data) < 0) {
		dev_err(max77888->dev,
			"device not found on this channel (this is not an error)\n");
		ret = -ENODEV;
		goto err_w_lock;
	} else {
		/* print rev */
		max77888->pmic_rev = (reg_data & 0x7);
		max77888->pmic_ver = ((reg_data & 0xF8) >> 0x3);
		pr_info("%s:%s device found: rev.0x%x, ver.0x%x\n",
				MFD_DEV_NAME, __func__,
				max77888->pmic_rev, max77888->pmic_ver);
	}

	/* No active discharge on safeout ldo 1,2 */
	max77888_update_reg(i2c, MAX77888_CHG_REG_SAFEOUT_CTRL, 0x00, 0x30);

	max77888->muic = i2c_new_dummy(i2c->adapter, I2C_ADDR_MUIC);
	i2c_set_clientdata(max77888->muic, max77888);

	max77888->haptic = i2c_new_dummy(i2c->adapter, I2C_ADDR_HAPTIC);
	i2c_set_clientdata(max77888->haptic, max77888);

	ret = max77888_irq_init(max77888);

	if (ret < 0)
		goto err_irq_init;

	ret = mfd_add_devices(max77888->dev, -1, max77888_devs,
			ARRAY_SIZE(max77888_devs), NULL, 0, NULL);
	if (ret < 0)
		goto err_mfd;

	device_init_wakeup(max77888->dev, pdata->wakeup);

	return ret;

err_mfd:
	mfd_remove_devices(max77888->dev);
err_irq_init:
	i2c_unregister_device(max77888->muic);
	i2c_unregister_device(max77888->haptic);
err_w_lock:
	mutex_destroy(&max77888->i2c_lock);
err:
	kfree(max77888);
	return ret;
}
示例#30
0
/******************************************************************************
 * probe & remove
 */
int mcp2210_irq_probe(struct mcp2210_device *dev)
{
	uint i;
	int ret;

	mcp2210_info();
	mutex_init(&dev->irq_lock);

	dev->nr_irqs = 0;
	dev->poll_intr = 0;
	dev->poll_gpio = 0;

	for (i = 0; i < MCP2210_NUM_PINS; ++i) {
		const struct mcp2210_pin_config *pin = &dev->config->pins[i];

		if (pin->mode == MCP2210_PIN_SPI || !pin->has_irq)
			continue;

		++dev->nr_irqs;
		BUG_ON(dev->irq_revmap[i]);
		dev->irq_revmap[i] = pin->irq;

		if (pin->mode == MCP2210_PIN_DEDICATED)
			dev->poll_intr = 1;
		else if (pin->mode == MCP2210_PIN_GPIO) {
			dev->poll_gpio = 1;
			dev->irq_type[i] = pin->irq_type;
		}
	}

	if (!dev->nr_irqs)
		return 0;

	ret = irq_alloc_descs(-1, 0, dev->nr_irqs, 0);
	if (ret < 0) {
		/* CONFIG_SPARSE_IRQ needed? */
		mcp2210_err("Failed to allocate %u irq descriptors: %d", dev->nr_irqs, ret);
		return ret;
	}
	dev->irq_base = ret;

	for (i = 0; i < dev->nr_irqs; ++i) {
		int virq = dev->irq_base + i;

		dev->irq_descs[i] = irq_to_desc(virq);
		BUG_ON(!dev->irq_descs[i]);
		irq_set_chip_data(virq, dev);
		irq_set_chip(virq, &mcp2210_irq_chip);

#if defined(CONFIG_ARM) && LINUX_VERSION_CODE < KERNEL_VERSION(4,3,0)
		set_irq_flags(virq, 0);
#else
		irq_set_noprobe(virq);
#endif
	}

#ifdef CONFIG_MCP2210_GPIO
	if (dev->poll_gpio) {
		ctl_cmd_init(dev, &dev->cmd_poll_gpio,
			     MCP2210_CMD_GET_PIN_VALUE, 0, NULL, 0, false);
		dev->cmd_poll_gpio.head.complete = complete_poll;
		mcp2210_add_cmd(&dev->cmd_poll_gpio.head, false);
	}
#endif /* CONFIG_MCP2210_GPIO */

	if (dev->poll_intr) {
		/* read and then reset */
		ctl_cmd_init(dev, &dev->cmd_poll_intr,
			     MCP2210_CMD_GET_INTERRUPTS, 0, NULL, 0, false);
		dev->cmd_poll_intr.head.complete = complete_poll;
		mcp2210_add_cmd(&dev->cmd_poll_intr.head, false);
	}

	dev->is_irq_probed = 1;
	dev->suppress_poll_warn = 0;

	return 0;
}