Ejemplo n.º 1
0
static void __init mx31ads_init_expio(void)
{
	int irq_base;
	int i, irq;

	printk(KERN_INFO "MX31ADS EXPIO(CPLD) hardware\n");

	/*
	 * Configure INT line as GPIO input
	 */
	mxc_iomux_alloc_pin(IOMUX_MODE(MX31_PIN_GPIO1_4, IOMUX_CONFIG_GPIO), "expio");

	/* disable the interrupt and clear the status */
	__raw_writew(0xFFFF, PBC_INTMASK_CLEAR_REG);
	__raw_writew(0xFFFF, PBC_INTSTATUS_REG);

	irq_base = irq_alloc_descs(-1, 0, MXC_MAX_EXP_IO_LINES, numa_node_id());
	WARN_ON(irq_base < 0);

	domain = irq_domain_add_legacy(NULL, MXC_MAX_EXP_IO_LINES, irq_base, 0,
				       &irq_domain_simple_ops, NULL);
	WARN_ON(!domain);

	for (i = irq_base; i < irq_base + MXC_MAX_EXP_IO_LINES; i++) {
		irq_set_chip_and_handler(i, &expio_irq_chip, handle_level_irq);
		set_irq_flags(i, IRQF_VALID);
	}
	irq = gpio_to_irq(IOMUX_TO_GPIO(MX31_PIN_GPIO1_4));
	irq_set_irq_type(irq, IRQ_TYPE_LEVEL_HIGH);
	irq_set_chained_handler(irq, mx31ads_expio_irq_handler);
}
Ejemplo n.º 2
0
static int __init ath79_misc_intc_of_init(
	struct device_node *node, struct device_node *parent)
{
	void __iomem *base = ath79_reset_base;
	struct irq_domain *domain;
	int irq;

	irq = irq_of_parse_and_map(node, 0);
	if (!irq)
		panic("Failed to get MISC IRQ");

	domain = irq_domain_add_legacy(node, ATH79_MISC_IRQ_COUNT,
			ATH79_MISC_IRQ_BASE, 0, &misc_irq_domain_ops, NULL);
	if (!domain)
		panic("Failed to add MISC irqdomain");

	/* Disable and clear all interrupts */
	__raw_writel(0, base + AR71XX_RESET_REG_MISC_INT_ENABLE);
	__raw_writel(0, base + AR71XX_RESET_REG_MISC_INT_STATUS);


	irq_set_chained_handler(irq, ath79_misc_irq_handler);

	return 0;
}
Ejemplo n.º 3
0
static int __devinit em_gio_irq_domain_init(struct em_gio_priv *p)
{
	struct platform_device *pdev = p->pdev;
	struct gpio_em_config *pdata = pdev->dev.platform_data;

	p->irq_base = irq_alloc_descs(pdata->irq_base, 0,
				      pdata->number_of_pins, numa_node_id());
	if (IS_ERR_VALUE(p->irq_base)) {
		dev_err(&pdev->dev, "cannot get irq_desc\n");
		return -ENXIO;
	}
	pr_debug("gio: hw base = %d, nr = %d, sw base = %d\n",
		 pdata->gpio_base, pdata->number_of_pins, p->irq_base);

	p->irq_domain = irq_domain_add_legacy(pdev->dev.of_node,
					      pdata->number_of_pins,
					      p->irq_base, 0,
					      &em_gio_irq_domain_ops, p);
	if (!p->irq_domain) {
		irq_free_descs(p->irq_base, pdata->number_of_pins);
		return -ENXIO;
	}

	return 0;
}
Ejemplo n.º 4
0
int __init xtensa_pic_init_legacy(struct device_node *interrupt_parent)
{
	struct irq_domain *root_domain =
		irq_domain_add_legacy(NULL, NR_IRQS - 1, 1, 0,
				&xtensa_irq_domain_ops, &xtensa_irq_chip);
	irq_set_default_host(root_domain);
	return 0;
}
Ejemplo n.º 5
0
static int __init imx53_gpio_add_irq_domain(struct device_node *np,
				struct device_node *interrupt_parent)
{
	static int gpio_irq_base = MXC_GPIO_IRQ_START + ARCH_NR_GPIOS;

	gpio_irq_base -= 32;
	irq_domain_add_legacy(np, 32, gpio_irq_base, 0, &irq_domain_simple_ops, NULL);

	return 0;
}
Ejemplo n.º 6
0
void __init sa1100_init_irq(void)
{
	request_resource(&iomem_resource, &irq_resource);

	/* disable all IRQs */
	ICMR = 0;

	/* all IRQs are IRQ, not FIQ */
	ICLR = 0;

	/* clear all GPIO edge detects */
	GFER = 0;
	GRER = 0;
	GEDR = -1;

	/*
	 * Whatever the doc says, this has to be set for the wait-on-irq
	 * instruction to work... on a SA1100 rev 9 at least.
	 */
	ICCR = 1;

	sa1100_low_gpio_irqdomain = irq_domain_add_legacy(NULL,
			11, IRQ_GPIO0, 0,
			&sa1100_low_gpio_irqdomain_ops, NULL);

	sa1100_normal_irqdomain = irq_domain_add_legacy(NULL,
			21, IRQ_GPIO11_27, 11,
			&sa1100_normal_irqdomain_ops, NULL);

	sa1100_high_gpio_irqdomain = irq_domain_add_legacy(NULL,
			17, IRQ_GPIO11, 11,
			&sa1100_high_gpio_irqdomain_ops, NULL);

	/*
	 * Install handler for GPIO 11-27 edge detect interrupts
	 */
	irq_set_chained_handler(IRQ_GPIO11_27, sa1100_high_gpio_handler);

	set_handle_irq(sa1100_handle_irq);

	sa1100_init_gpio();
}
Ejemplo n.º 7
0
static void __init __mips_cpu_irq_init(struct device_node *of_node)
{
	struct irq_domain *domain;

	/* Mask interrupts. */
	clear_c0_status(ST0_IM);
	clear_c0_cause(CAUSEF_IP);

	domain = irq_domain_add_legacy(of_node, 8, MIPS_CPU_IRQ_BASE, 0,
				       &mips_cpu_intc_irq_domain_ops, NULL);
	if (!domain)
		panic("Failed to add irqdomain for MIPS CPU");
}
Ejemplo n.º 8
0
void __init mmp_dt_irq_init(void)
{
	struct device_node *node;
	const struct of_device_id *of_id;
	struct mmp_intc_conf *conf;
	int nr_irqs, irq_base, ret, irq;

	node = of_find_matching_node(NULL, intc_ids);
	if (!node) {
		pr_err("Failed to find interrupt controller in arch-mmp\n");
		return;
	}
	of_id = of_match_node(intc_ids, node);
	conf = of_id->data;

	ret = of_property_read_u32(node, "mrvl,intc-nr-irqs", &nr_irqs);
	if (ret) {
		pr_err("Not found mrvl,intc-nr-irqs property\n");
		return;
	}

	mmp_icu_base = of_iomap(node, 0);
	if (!mmp_icu_base) {
		pr_err("Failed to get interrupt controller register\n");
		return;
	}

	irq_base = irq_alloc_descs(-1, 0, nr_irqs - NR_IRQS_LEGACY, 0);
	if (irq_base < 0) {
		pr_err("Failed to allocate IRQ numbers\n");
		goto err;
	} else if (irq_base != NR_IRQS_LEGACY) {
		pr_err("ICU's irqbase should be started from 0\n");
		goto err;
	}
	icu_data[0].conf_enable = conf->conf_enable;
	icu_data[0].conf_disable = conf->conf_disable;
	icu_data[0].conf_mask = conf->conf_mask;
	icu_data[0].nr_irqs = nr_irqs;
	icu_data[0].virq_base = 0;
	icu_data[0].domain = irq_domain_add_legacy(node, nr_irqs, 0, 0,
						   &mmp_irq_domain_ops,
						   &icu_data[0]);
	irq_set_default_host(icu_data[0].domain);
	for (irq = 0; irq < nr_irqs; irq++)
		icu_mask_irq(irq_get_irq_data(irq));
	mmp2_mux_init(node);
	return;
err:
	iounmap(mmp_icu_base);
}
Ejemplo n.º 9
0
static void __init combiner_init(void __iomem *combiner_base,
				 struct device_node *np)
{
	int i, irq, irq_base;
	unsigned int nr_irq, soc_max_nr;

	soc_max_nr = (soc_is_exynos5250() || soc_is_exynos542x())
			? EXYNOS5_MAX_COMBINER_NR : EXYNOS4_MAX_COMBINER_NR;

	if (np) {
		if (of_property_read_u32(np, "samsung,combiner-nr", &max_nr)) {
			pr_warning("%s: number of combiners not specified, "
				"setting default as %d.\n",
				__func__, EXYNOS4_MAX_COMBINER_NR);
			max_nr = EXYNOS4_MAX_COMBINER_NR;
		}
	} else {
		max_nr = soc_is_exynos5250() ? EXYNOS5_MAX_COMBINER_NR :
						EXYNOS4_MAX_COMBINER_NR;
	}
	nr_irq = max_nr * MAX_IRQ_IN_COMBINER;

	irq_base = irq_alloc_descs(COMBINER_IRQ(0, 0), 1, nr_irq, 0);
	if (IS_ERR_VALUE(irq_base)) {
		irq_base = COMBINER_IRQ(0, 0);
		pr_warning("%s: irq desc alloc failed. Continuing with %d as linux irq base\n", __func__, irq_base);
	}

	combiner_irq_domain = irq_domain_add_legacy(np, nr_irq, irq_base, 0,
				&combiner_irq_domain_ops, &combiner_data);
	if (WARN_ON(!combiner_irq_domain)) {
		pr_warning("%s: irq domain init failed\n", __func__);
		return;
	}

	for (i = 0; i < max_nr; i++) {
		combiner_init_one(i, combiner_base + (i >> 2) * 0x10);
		irq = IRQ_SPI(i);
#ifdef CONFIG_OF
		if (np)
			irq = irq_of_parse_and_map(np, i);
#endif
		combiner_cascade_irq(i, irq);
	}

#ifdef CONFIG_PM
	/* Setup suspend/resume combiner saving */
	cpu_pm_register_notifier(&combiner_notifier_block);
#endif
}
Ejemplo n.º 10
0
void __init armctrl_dt_init(void)
{
	struct device_node *np;
	struct irq_domain *domain;

	np = of_find_compatible_node(NULL, NULL, "brcm,bcm2708-armctrl-ic");
	if (!np)
		return;

	domain = irq_domain_add_legacy(np, BCM2708_ALLOC_IRQS,
					IRQ_ARMCTRL_START, 0,
					&armctrl_ops, NULL);
        WARN_ON(!domain);
}
Ejemplo n.º 11
0
/*
 * irqdomain initialization: pile up irqdomains on top of AIC range
 */
static void __init at91_gpio_irqdomain(struct at91_gpio_chip *at91_gpio)
{
	int irq_base;

	irq_base = irq_alloc_descs(-1, 0, at91_gpio->chip.ngpio, 0);
	if (irq_base < 0)
		panic("at91_gpio.%d: error %d: couldn't allocate IRQ numbers.\n",
			at91_gpio->pioc_idx, irq_base);
	at91_gpio->domain = irq_domain_add_legacy(NULL, at91_gpio->chip.ngpio,
						  irq_base, 0,
						  &irq_domain_simple_ops, NULL);
	if (!at91_gpio->domain)
		panic("at91_gpio.%d: couldn't allocate irq domain.\n",
			at91_gpio->pioc_idx);
}
Ejemplo n.º 12
0
static int __init intc_of_init(struct device_node *node,
			       struct device_node *parent)
{
	struct resource res;
	struct irq_domain *domain;
	int irq;

	if (!of_property_read_u32_array(node, "ralink,intc-registers",
					rt_intc_regs, 6))
		pr_info("intc: using register map from devicetree\n");

	irq = irq_of_parse_and_map(node, 0);
	if (!irq)
		panic("Failed to get INTC IRQ");

	if (of_address_to_resource(node, 0, &res))
		panic("Failed to get intc memory range");

	if (request_mem_region(res.start, resource_size(&res),
				res.name) < 0)
		pr_err("Failed to request intc memory");

	rt_intc_membase = ioremap_nocache(res.start,
					resource_size(&res));
	if (!rt_intc_membase)
		panic("Failed to remap intc memory");

	/* disable all interrupts */
	rt_intc_w32(~0, INTC_REG_DISABLE);

	/* route all INTC interrupts to MIPS HW0 interrupt */
	rt_intc_w32(0, INTC_REG_TYPE);

	domain = irq_domain_add_legacy(node, RALINK_INTC_IRQ_COUNT,
			RALINK_INTC_IRQ_BASE, 0, &irq_domain_ops, NULL);
	if (!domain)
		panic("Failed to add irqdomain");

	rt_intc_w32(INTC_INT_GLOBAL, INTC_REG_ENABLE);

	irq_set_chained_handler(irq, ralink_intc_irq_handler);
	irq_set_handler_data(irq, domain);

	/* tell the kernel which irq is used for performance monitoring */
	rt_perfcount_irq = irq_create_mapping(domain, 9);

	return 0;
}
Ejemplo n.º 13
0
static int __init shirq_init(struct spear_shirq **shirq_blocks, int block_nr,
		struct device_node *np)
{
	int i, parent_irq, virq_base, hwirq = 0, nr_irqs = 0;
	struct irq_domain *shirq_domain;
	void __iomem *base;

	base = of_iomap(np, 0);
	if (!base) {
		pr_err("%s: failed to map shirq registers\n", __func__);
		return -ENXIO;
	}

	for (i = 0; i < block_nr; i++)
		nr_irqs += shirq_blocks[i]->nr_irqs;

	virq_base = irq_alloc_descs(-1, 0, nr_irqs, 0);
	if (IS_ERR_VALUE(virq_base)) {
		pr_err("%s: irq desc alloc failed\n", __func__);
		goto err_unmap;
	}

	shirq_domain = irq_domain_add_legacy(np, nr_irqs, virq_base, 0,
			&irq_domain_simple_ops, NULL);
	if (WARN_ON(!shirq_domain)) {
		pr_warn("%s: irq domain init failed\n", __func__);
		goto err_free_desc;
	}

	for (i = 0; i < block_nr; i++) {
		shirq_blocks[i]->base = base;
		shirq_blocks[i]->virq_base = irq_find_mapping(shirq_domain,
				hwirq);

		parent_irq = irq_of_parse_and_map(np, i);
		spear_shirq_register(shirq_blocks[i], parent_irq);
		hwirq += shirq_blocks[i]->nr_irqs;
	}

	return 0;

err_free_desc:
	irq_free_descs(virq_base, nr_irqs);
err_unmap:
	iounmap(base);
	return -ENXIO;
}
Ejemplo n.º 14
0
static int __init
init_onchip_IRQ(struct device_node *intc, struct device_node *parent)
{
	if (parent)
		panic("DeviceTree incore intc not a root irq controller\n");

	root_domain = irq_domain_add_legacy(intc, NR_CPU_IRQS, 0, 0,
					    &arcv2_irq_ops, NULL);

	if (!root_domain)
		panic("root irq domain not avail\n");

	/* with this we don't need to export root_domain */
	irq_set_default_host(root_domain);

	return 0;
}
Ejemplo n.º 15
0
void __init sirfsoc_of_irq_init(void)
{
	struct device_node *np;

	np = of_find_matching_node(NULL, intc_ids);
	if (!np)
		panic("unable to find compatible intc node in dtb\n");

	sirfsoc_intc_base = of_iomap(np, 0);
	if (!sirfsoc_intc_base)
		panic("unable to map intc cpu registers\n");

	irq_domain_add_legacy(np, 32, 0, 0, &irq_domain_simple_ops, NULL);

	of_node_put(np);

	sirfsoc_irq_init();
}
Ejemplo n.º 16
0
int __init vt8500_irq_init(struct device_node *node, struct device_node *parent)
{
    struct irq_domain *vt8500_irq_domain;
    struct vt8500_irq_priv *priv;
    int irq, i;
    struct device_node *np = node;

    priv = kzalloc(sizeof(struct vt8500_irq_priv), GFP_KERNEL);
    priv->base = of_iomap(np, 0);

    vt8500_irq_domain = irq_domain_add_legacy(node, 64, irq_cnt, 0,
                        &vt8500_irq_domain_ops, priv);
    if (!vt8500_irq_domain)
        pr_err("%s: Unable to add wmt irq domain!\n", __func__);

    irq_set_default_host(vt8500_irq_domain);

    vt8500_init_irq_hw(priv->base);

    pr_info("Added IRQ Controller @ %x [virq_base = %d]\n",
            (u32)(priv->base), irq_cnt);

    /* check if this is a slaved controller */
    if (of_irq_count(np) != 0) {
        /* check that we have the correct number of interrupts */
        if (of_irq_count(np) != 8) {
            pr_err("%s: Incorrect IRQ map for slave controller\n",
                   __func__);
            return -EINVAL;
        }

        for (i = 0; i < 8; i++) {
            irq = irq_of_parse_and_map(np, i);
            enable_irq(irq);
        }

        pr_info("vt8500-irq: Enabled slave->parent interrupts\n");
    }

    irq_cnt += 64;

    return 0;
}
Ejemplo n.º 17
0
/*
 * Initialize the AIC interrupt controller.
 */
void __init at91_aic_init(unsigned int priority[NR_AIC_IRQS])
{
	unsigned int i;
	int irq_base;

	at91_aic_base = ioremap(AT91_AIC, 512);
	if (!at91_aic_base)
		panic("Unable to ioremap AIC registers\n");

	/* Add irq domain for AIC */
	irq_base = irq_alloc_descs(-1, 0, NR_AIC_IRQS, 0);
	if (irq_base < 0) {
		WARN(1, "Cannot allocate irq_descs, assuming pre-allocated\n");
		irq_base = 0;
	}
	at91_aic_domain = irq_domain_add_legacy(at91_aic_np, NR_AIC_IRQS,
						irq_base, 0,
						&irq_domain_simple_ops, NULL);

	if (!at91_aic_domain)
		panic("Unable to add AIC irq domain\n");

	irq_set_default_host(at91_aic_domain);

	/*
	 * The IVR is used by macro get_irqnr_and_base to read and verify.
	 * The irq number is NR_AIC_IRQS when a spurious interrupt has occurred.
	 */
	for (i = 0; i < NR_AIC_IRQS; i++) {
		/* Put hardware irq number in Source Vector Register: */
		at91_aic_write(AT91_AIC_SVR(i), i);
		/* Active Low interrupt, with the specified priority */
		at91_aic_write(AT91_AIC_SMR(i), AT91_AIC_SRCTYPE_LOW | priority[i]);

		irq_set_chip_and_handler(i, &at91_aic_chip, handle_level_irq);
		set_irq_flags(i, IRQF_VALID | IRQF_PROBE);
	}

	at91_aic_hw_init(NR_AIC_IRQS);
}
Ejemplo n.º 18
0
static int tc3589x_irq_init(struct tc3589x *tc3589x, struct device_node *np)
{
	int base = tc3589x->irq_base;

	if (base) {
		tc3589x->domain = irq_domain_add_legacy(
			NULL, TC3589x_NR_INTERNAL_IRQS, base,
			0, &tc3589x_irq_ops, tc3589x);
	}
	else {
		tc3589x->domain = irq_domain_add_linear(
			np, TC3589x_NR_INTERNAL_IRQS,
			&tc3589x_irq_ops, tc3589x);
	}

	if (!tc3589x->domain) {
		dev_err(tc3589x->dev, "Failed to create irqdomain\n");
		return -ENOSYS;
	}

	return 0;
}
Ejemplo n.º 19
0
static int __init orion_add_irq_domain(struct device_node *np,
				       struct device_node *interrupt_parent)
{
	int i = 0, irq_gpio;
	void __iomem *base;

	do {
		base = of_iomap(np, i);
		if (base) {
			orion_irq_init(i * 32, base);
			i++;
		}
	} while (base);

	irq_domain_add_legacy(np, i * 32, 0, 0,
			      &irq_domain_simple_ops, NULL);

	irq_gpio = i * 32;
	orion_gpio_of_init(irq_gpio);

	return 0;
}
Ejemplo n.º 20
0
/* MMP (ARMv5) */
void __init icu_init_irq(void)
{
	int irq;

	max_icu_nr = 1;
	mmp_icu_base = ioremap(0xd4282000, 0x1000);
	icu_data[0].conf_enable = mmp_conf.conf_enable;
	icu_data[0].conf_disable = mmp_conf.conf_disable;
	icu_data[0].conf_mask = mmp_conf.conf_mask;
	icu_data[0].nr_irqs = 64;
	icu_data[0].virq_base = 0;
	icu_data[0].domain = irq_domain_add_legacy(NULL, 64, 0, 0,
						   &irq_domain_simple_ops,
						   &icu_data[0]);
	for (irq = 0; irq < 64; irq++) {
		icu_mask_irq(irq_get_irq_data(irq));
		irq_set_chip_and_handler(irq, &icu_irq_chip, handle_level_irq);
		set_irq_flags(irq, IRQF_VALID);
	}
	irq_set_default_host(icu_data[0].domain);
#ifdef CONFIG_CPU_PXA910
	icu_irq_chip.irq_set_wake = pxa910_set_wake;
#endif
}
Ejemplo n.º 21
0
/**
 * xaxipcie_alloc_msi_irqdescs - allocate msi irq descs
 * @node: Pointer to device node structure
 * @msg_addr: PCIe MSI message address
 *
 * @return: Allocated MSI IRQ Base/ error
 *
 * @note: This function is called when xaxipcie_init_port() is called
 */
int xaxipcie_alloc_msi_irqdescs(struct device_node *node,
					unsigned long msg_addr)
{
	/* Store the PCIe MSI message address */
	xaxipcie_msg_addr = msg_addr;

	/* Allocate MSI IRQ descriptors */
	xaxipcie_msi_irq_base = irq_alloc_descs(-1, 0,
					XILINX_NUM_MSI_IRQS, 0);

	if (xaxipcie_msi_irq_base < 0)
		return -ENODEV;

	/* Register IRQ domain */
	xaxipcie_irq_domain = irq_domain_add_legacy(node,
				XILINX_NUM_MSI_IRQS,
				xaxipcie_msi_irq_base,
				0, &irq_domain_simple_ops, NULL);

	if (!xaxipcie_irq_domain)
		return -ENOMEM;

	return xaxipcie_msi_irq_base;
}
Ejemplo n.º 22
0
static int __init omap_init_irq_legacy(u32 base, struct device_node *node)
{
	int j, irq_base;

	omap_irq_base = ioremap(base, SZ_4K);
	if (WARN_ON(!omap_irq_base))
		return -ENOMEM;

	irq_base = irq_alloc_descs(-1, 0, omap_nr_irqs, 0);
	if (irq_base < 0) {
		pr_warn("Couldn't allocate IRQ numbers\n");
		irq_base = 0;
	}

	domain = irq_domain_add_legacy(node, omap_nr_irqs, irq_base, 0,
			&irq_domain_simple_ops, NULL);

	omap_irq_soft_reset();

	for (j = 0; j < omap_nr_irqs; j += 32)
		omap_alloc_gc_legacy(omap_irq_base + j, j + irq_base, 32);

	return 0;
}
Ejemplo n.º 23
0
static int davinci_gpio_irq_setup(struct platform_device *pdev)
{
	unsigned	gpio, bank;
	int		irq;
	struct clk	*clk;
	u32		binten = 0;
	unsigned	ngpio, bank_irq;
	struct device *dev = &pdev->dev;
	struct resource	*res;
	struct davinci_gpio_controller *chips = platform_get_drvdata(pdev);
	struct davinci_gpio_platform_data *pdata = dev->platform_data;
	struct davinci_gpio_regs __iomem *g;
	struct irq_domain	*irq_domain = NULL;
	const struct of_device_id *match;
	struct irq_chip *irq_chip;
	gpio_get_irq_chip_cb_t gpio_get_irq_chip;

	/*
	 * Use davinci_gpio_get_irq_chip by default to handle non DT cases
	 */
	gpio_get_irq_chip = davinci_gpio_get_irq_chip;
	match = of_match_device(of_match_ptr(davinci_gpio_ids),
				dev);
	if (match)
		gpio_get_irq_chip = (gpio_get_irq_chip_cb_t)match->data;

	ngpio = pdata->ngpio;
	res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
	if (!res) {
		dev_err(dev, "Invalid IRQ resource\n");
		return -EBUSY;
	}

	bank_irq = res->start;

	if (!bank_irq) {
		dev_err(dev, "Invalid IRQ resource\n");
		return -ENODEV;
	}

	clk = devm_clk_get(dev, "gpio");
	if (IS_ERR(clk)) {
		printk(KERN_ERR "Error %ld getting gpio clock?\n",
		       PTR_ERR(clk));
		return PTR_ERR(clk);
	}
	clk_prepare_enable(clk);

	if (!pdata->gpio_unbanked) {
		irq = irq_alloc_descs(-1, 0, ngpio, 0);
		if (irq < 0) {
			dev_err(dev, "Couldn't allocate IRQ numbers\n");
			return irq;
		}

		irq_domain = irq_domain_add_legacy(dev->of_node, ngpio, irq, 0,
							&davinci_gpio_irq_ops,
							chips);
		if (!irq_domain) {
			dev_err(dev, "Couldn't register an IRQ domain\n");
			return -ENODEV;
		}
	}

	/*
	 * Arrange gpio_to_irq() support, handling either direct IRQs or
	 * banked IRQs.  Having GPIOs in the first GPIO bank use direct
	 * IRQs, while the others use banked IRQs, would need some setup
	 * tweaks to recognize hardware which can do that.
	 */
	for (gpio = 0, bank = 0; gpio < ngpio; bank++, gpio += 32) {
		chips[bank].chip.to_irq = gpio_to_irq_banked;
		chips[bank].irq_domain = irq_domain;
	}

	/*
	 * AINTC can handle direct/unbanked IRQs for GPIOs, with the GPIO
	 * controller only handling trigger modes.  We currently assume no
	 * IRQ mux conflicts; gpio_irq_type_unbanked() is only for GPIOs.
	 */
	if (pdata->gpio_unbanked) {
		/* pass "bank 0" GPIO IRQs to AINTC */
		chips[0].chip.to_irq = gpio_to_irq_unbanked;
		chips[0].gpio_irq = bank_irq;
		chips[0].gpio_unbanked = pdata->gpio_unbanked;
		binten = GENMASK(pdata->gpio_unbanked / 16, 0);

		/* AINTC handles mask/unmask; GPIO handles triggering */
		irq = bank_irq;
		irq_chip = gpio_get_irq_chip(irq);
		irq_chip->name = "GPIO-AINTC";
		irq_chip->irq_set_type = gpio_irq_type_unbanked;

		/* default trigger: both edges */
		g = gpio2regs(0);
		writel_relaxed(~0, &g->set_falling);
		writel_relaxed(~0, &g->set_rising);

		/* set the direct IRQs up to use that irqchip */
		for (gpio = 0; gpio < pdata->gpio_unbanked; gpio++, irq++) {
			irq_set_chip(irq, irq_chip);
			irq_set_handler_data(irq, &chips[gpio / 32]);
			irq_set_status_flags(irq, IRQ_TYPE_EDGE_BOTH);
		}

		goto done;
	}

	/*
	 * Or, AINTC can handle IRQs for banks of 16 GPIO IRQs, which we
	 * then chain through our own handler.
	 */
	for (gpio = 0, bank = 0; gpio < ngpio; bank++, bank_irq++, gpio += 16) {
		/* disabled by default, enabled only as needed */
		g = gpio2regs(gpio);
		writel_relaxed(~0, &g->clr_falling);
		writel_relaxed(~0, &g->clr_rising);

		/*
		 * Each chip handles 32 gpios, and each irq bank consists of 16
		 * gpio irqs. Pass the irq bank's corresponding controller to
		 * the chained irq handler.
		 */
		irq_set_chained_handler_and_data(bank_irq, gpio_irq_handler,
						 &chips[gpio / 32]);

		binten |= BIT(bank);
	}

done:
	/*
	 * BINTEN -- per-bank interrupt enable. genirq would also let these
	 * bits be set/cleared dynamically.
	 */
	writel_relaxed(binten, gpio_base + BINTEN);

	return 0;
}
Ejemplo n.º 24
0
static int bq51221_charger_probe(
						struct i2c_client *client,
						const struct i2c_device_id *id)
{
	struct device_node *of_node = client->dev.of_node;
	struct bq51221_charger_data *charger;
	bq51221_charger_platform_data_t *pdata = client->dev.platform_data;
	int ret = 0;

	dev_info(&client->dev,
		"%s: bq51221 Charger Driver Loading\n", __func__);

	if (of_node) {
		pdata = devm_kzalloc(&client->dev, sizeof(*pdata), GFP_KERNEL);
		if (!pdata) {
			dev_err(&client->dev, "Failed to allocate memory\n");
			return -ENOMEM;
		}
		ret = bq51221_chg_parse_dt(&client->dev, pdata);
		if (ret < 0)
			goto err_parse_dt;
	} else {
		pdata = client->dev.platform_data;
	}

	charger = kzalloc(sizeof(*charger), GFP_KERNEL);
	if (charger == NULL) {
		dev_err(&client->dev, "Memory is not enough.\n");
		ret = -ENOMEM;
		goto err_wpc_nomem;
	}
	charger->dev = &client->dev;

	ret = i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_BYTE_DATA |
		I2C_FUNC_SMBUS_WORD_DATA | I2C_FUNC_SMBUS_I2C_BLOCK);
	if (!ret) {
		ret = i2c_get_functionality(client->adapter);
		dev_err(charger->dev, "I2C functionality is not supported.\n");
		ret = -ENOSYS;
		goto err_i2cfunc_not_support;
	}

	charger->client = client;
	charger->pdata = pdata;

    pr_info("%s: %s\n", __func__, charger->pdata->wireless_charger_name );

	/* if board-init had already assigned irq_base (>=0) ,
	no need to allocate it;
	assign -1 to let this driver allocate resource by itself*/
#if 0 /* this part is for bq51221s */
    if (pdata->irq_base < 0)
        pdata->irq_base = irq_alloc_descs(-1, 0, BQ51221_EVENT_IRQ, 0);
	if (pdata->irq_base < 0) {
		pr_err("%s: irq_alloc_descs Fail! ret(%d)\n",
				__func__, pdata->irq_base);
		ret = -EINVAL;
		goto irq_base_err;
	} else {
		charger->irq_base = pdata->irq_base;
		pr_info("%s: irq_base = %d\n",
			 __func__, charger->irq_base);

#if (LINUX_VERSION_CODE>=KERNEL_VERSION(3,4,0))
		irq_domain_add_legacy(of_node, BQ51221_EVENT_IRQ, charger->irq_base, 0,
				      &irq_domain_simple_ops, NULL);
#endif /*(LINUX_VERSION_CODE>=KERNEL_VERSION(3,4,0))*/
	}
#endif
	i2c_set_clientdata(client, charger);

	charger->psy_chg.name		= pdata->wireless_charger_name;
	charger->psy_chg.type		= POWER_SUPPLY_TYPE_UNKNOWN;
	charger->psy_chg.get_property	= bq51221_chg_get_property;
	charger->psy_chg.set_property	= bq51221_chg_set_property;
	charger->psy_chg.properties	= sec_charger_props;
	charger->psy_chg.num_properties	= ARRAY_SIZE(sec_charger_props);

	mutex_init(&charger->io_lock);

#if 0 /* this part is for bq51221s */

	if (charger->chg_irq) {
		INIT_DELAYED_WORK(
			&charger->isr_work, bq51221_chg_isr_work);

		ret = request_threaded_irq(charger->chg_irq,
				NULL, bq51221_chg_irq_thread,
				IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING,
				"charger-irq", charger);
		if (ret) {
			dev_err(&client->dev,
				"%s: Failed to Reqeust IRQ\n", __func__);
			goto err_supply_unreg;
		}

		ret = enable_irq_wake(charger->chg_irq);
		if (ret < 0)
			dev_err(&client->dev,
				"%s: Failed to Enable Wakeup Source(%d)\n",
				__func__, ret);
	}
#endif
	charger->pdata->cs100_status = 0;
	charger->pdata->pad_mode = BQ51221_PAD_MODE_NONE;

	ret = power_supply_register(&client->dev, &charger->psy_chg);
	if (ret) {
		dev_err(&client->dev,
			"%s: Failed to Register psy_chg\n", __func__);
		goto err_supply_unreg;
	}

	charger->wqueue = create_workqueue("bq51221_workqueue");
	if (!charger->wqueue) {
		pr_err("%s: Fail to Create Workqueue\n", __func__);
		goto err_pdata_free;
	}

	wake_lock_init(&(charger->wpc_wake_lock), WAKE_LOCK_SUSPEND,
			"wpc_wakelock");
	INIT_DELAYED_WORK(&charger->wpc_work, bq51221_detect_work);

	dev_info(&client->dev,
		"%s: bq51221 Charger Driver Loaded\n", __func__);

	return 0;

err_pdata_free:
	power_supply_unregister(&charger->psy_chg);
err_supply_unreg:
	mutex_destroy(&charger->io_lock);
err_i2cfunc_not_support:
	kfree(charger);
err_wpc_nomem:
err_parse_dt:
	kfree(pdata);
	return ret;
}
Ejemplo n.º 25
0
static struct s3c_irq_intc * __init s3c24xx_init_intc(struct device_node *np,
				       struct s3c_irq_data *irq_data,
				       struct s3c_irq_intc *parent,
				       unsigned long address)
{
	struct s3c_irq_intc *intc;
	void __iomem *base = (void *)0xf6000000; /* static mapping */
	int irq_num;
	int irq_start;
	int ret;

	intc = kzalloc(sizeof(struct s3c_irq_intc), GFP_KERNEL);
	if (!intc)
		return ERR_PTR(-ENOMEM);

	intc->irqs = irq_data;

	if (parent)
		intc->parent = parent;

	/* select the correct data for the controller.
	 * Need to hard code the irq num start and offset
	 * to preserve the static mapping for now
	 */
	switch (address) {
	case 0x4a000000:
		pr_debug("irq: found main intc\n");
		intc->reg_pending = base;
		intc->reg_mask = base + 0x08;
		intc->reg_intpnd = base + 0x10;
		irq_num = 32;
		irq_start = S3C2410_IRQ(0);
		break;
	case 0x4a000018:
		pr_debug("irq: found subintc\n");
		intc->reg_pending = base + 0x18;
		intc->reg_mask = base + 0x1c;
		irq_num = 29;
		irq_start = S3C2410_IRQSUB(0);
		break;
	case 0x4a000040:
		pr_debug("irq: found intc2\n");
		intc->reg_pending = base + 0x40;
		intc->reg_mask = base + 0x48;
		intc->reg_intpnd = base + 0x50;
		irq_num = 8;
		irq_start = S3C2416_IRQ(0);
		break;
	case 0x560000a4:
		pr_debug("irq: found eintc\n");
		base = (void *)0xfd000000;

		intc->reg_mask = base + 0xa4;
		intc->reg_pending = base + 0xa8;
		irq_num = 24;
		irq_start = S3C2410_IRQ(32);
		break;
	default:
		pr_err("irq: unsupported controller address\n");
		ret = -EINVAL;
		goto err;
	}

	/* now that all the data is complete, init the irq-domain */
	s3c24xx_clear_intc(intc);
	intc->domain = irq_domain_add_legacy(np, irq_num, irq_start,
					     0, &s3c24xx_irq_ops,
					     intc);
	if (!intc->domain) {
		pr_err("irq: could not create irq-domain\n");
		ret = -EINVAL;
		goto err;
	}

	set_handle_irq(s3c24xx_handle_irq);

	return intc;

err:
	kfree(intc);
	return ERR_PTR(ret);
}
Ejemplo n.º 26
0
/**
 * xgpiops_probe - Initialization method for a xgpiops device
 * @pdev:	platform device instance
 *
 * This function allocates memory resources for the gpio device and registers
 * all the banks of the device. It will also set up interrupts for the gpio
 * pins.
 * Note: Interrupts are disabled for all the banks during initialization.
 * Returns 0 on success, negative error otherwise.
 */
static int xgpiops_probe(struct platform_device *pdev)
{
	int ret;
	unsigned int irq_num;
	struct xgpiops *gpio;
	struct gpio_chip *chip;
	resource_size_t remap_size;
	struct resource *mem_res = NULL;
	int pin_num, bank_num, gpio_irq;

	gpio = kzalloc(sizeof(struct xgpiops), GFP_KERNEL);
	if (!gpio) {
		dev_err(&pdev->dev,
			"couldn't allocate memory for gpio private data\n");
		return -ENOMEM;
	}

	spin_lock_init(&gpio->gpio_lock);

	platform_set_drvdata(pdev, gpio);

	mem_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
	if (!mem_res) {
		dev_err(&pdev->dev, "No memory resource\n");
		ret = -ENODEV;
		goto err_free_gpio;
	}

	remap_size = mem_res->end - mem_res->start + 1;
	if (!request_mem_region(mem_res->start, remap_size, pdev->name)) {
		dev_err(&pdev->dev, "Cannot request IO\n");
		ret = -ENXIO;
		goto err_free_gpio;
	}

	gpio->base_addr = ioremap(mem_res->start, remap_size);
	if (gpio->base_addr == NULL) {
		dev_err(&pdev->dev, "Couldn't ioremap memory at 0x%08lx\n",
			(unsigned long)mem_res->start);
		ret = -ENOMEM;
		goto err_release_region;
	}

	irq_num = platform_get_irq(pdev, 0);
	gpio->irq = irq_num;

	/* configure the gpio chip */
	chip = &gpio->chip;
	chip->label = "xgpiops";
	chip->owner = THIS_MODULE;
	chip->dev = &pdev->dev;
	chip->get = xgpiops_get_value;
	chip->set = xgpiops_set_value;
	chip->request = xgpiops_request;
	chip->free = xgpiops_free;
	chip->direction_input = xgpiops_dir_in;
	chip->direction_output = xgpiops_dir_out;
	chip->to_irq = xgpiops_to_irq;
	chip->dbg_show = NULL;
	chip->base = 0;		/* default pin base */
	chip->ngpio = XGPIOPS_NR_GPIOS;
	chip->can_sleep = 0;

	gpio->irq_base = irq_alloc_descs(-1, 0, chip->ngpio, 0);
	if (gpio->irq_base < 0) {
		dev_err(&pdev->dev, "Couldn't allocate IRQ numbers\n");
		ret = -ENODEV;
		goto err_iounmap;
	}

	irq_domain = irq_domain_add_legacy(pdev->dev.of_node,
					   chip->ngpio, gpio->irq_base, 0,
					   &irq_domain_simple_ops, NULL);

	/* report a bug if gpio chip registration fails */
	ret = gpiochip_add(chip);
	if (ret < 0) {
		dev_err(&pdev->dev, "gpio chip registration failed\n");
		goto err_iounmap;
	} else {
		dev_info(&pdev->dev, "gpio at 0x%08lx mapped to 0x%08lx\n",
			 (unsigned long)mem_res->start,
			 (unsigned long)gpio->base_addr);
	}

	/* Enable GPIO clock */
	gpio->clk = clk_get(&pdev->dev, NULL);
	if (IS_ERR(gpio->clk)) {
		dev_err(&pdev->dev, "input clock not found.\n");
		ret = PTR_ERR(gpio->clk);
		goto err_chip_remove;
	}
	ret = clk_prepare_enable(gpio->clk);
	if (ret) {
		dev_err(&pdev->dev, "Unable to enable clock.\n");
		goto err_clk_put;
	}

	/* disable interrupts for all banks */
	for (bank_num = 0; bank_num < 4; bank_num++) {
		xgpiops_writereg(0xffffffff, gpio->base_addr +
				  XGPIOPS_INTDIS_OFFSET(bank_num));
	}

	/*
	 * set the irq chip, handler and irq chip data for callbacks for
	 * each pin
	 */
	for (pin_num = 0; pin_num < min_t(int, XGPIOPS_NR_GPIOS,
						(int)chip->ngpio); pin_num++) {
		gpio_irq = irq_find_mapping(irq_domain, pin_num);
		irq_set_chip_and_handler(gpio_irq, &xgpiops_irqchip,
							handle_simple_irq);
		irq_set_chip_data(gpio_irq, (void *)gpio);
		set_irq_flags(gpio_irq, IRQF_VALID);
	}

	irq_set_handler_data(irq_num, (void *)gpio);
	irq_set_chained_handler(irq_num, xgpiops_irqhandler);

	xgpiops_pm_runtime_init(pdev);

	device_set_wakeup_capable(&pdev->dev, 1);

	return 0;

err_clk_put:
	clk_put(gpio->clk);
err_chip_remove:
	gpiochip_remove(chip);
err_iounmap:
	iounmap(gpio->base_addr);
err_release_region:
	release_mem_region(mem_res->start, remap_size);
err_free_gpio:
	platform_set_drvdata(pdev, NULL);
	kfree(gpio);

	return ret;
}
Ejemplo n.º 27
0
static int __init goldfish_pic_of_init(struct device_node *of_node,
				       struct device_node *parent)
{
	struct goldfish_pic_data *gfpic;
	struct irq_chip_generic *gc;
	struct irq_chip_type *ct;
	unsigned int parent_irq;
	int ret = 0;

	gfpic = kzalloc(sizeof(*gfpic), GFP_KERNEL);
	if (!gfpic) {
		ret = -ENOMEM;
		goto out_err;
	}

	parent_irq = irq_of_parse_and_map(of_node, 0);
	if (!parent_irq) {
		pr_err("Failed to map parent IRQ!\n");
		ret = -EINVAL;
		goto out_free;
	}

	gfpic->base = of_iomap(of_node, 0);
	if (!gfpic->base) {
		pr_err("Failed to map base address!\n");
		ret = -ENOMEM;
		goto out_unmap_irq;
	}

	/* Mask interrupts. */
	writel(1, gfpic->base + GFPIC_REG_IRQ_DISABLE_ALL);

	gc = irq_alloc_generic_chip("GFPIC", 1, GFPIC_IRQ_BASE, gfpic->base,
				    handle_level_irq);
	if (!gc) {
		pr_err("Failed to allocate chip structures!\n");
		ret = -ENOMEM;
		goto out_iounmap;
	}

	ct = gc->chip_types;
	ct->regs.enable = GFPIC_REG_IRQ_ENABLE;
	ct->regs.disable = GFPIC_REG_IRQ_DISABLE;
	ct->chip.irq_unmask = irq_gc_unmask_enable_reg;
	ct->chip.irq_mask = irq_gc_mask_disable_reg;

	irq_setup_generic_chip(gc, IRQ_MSK(GFPIC_NR_IRQS), 0,
			       IRQ_NOPROBE | IRQ_LEVEL, 0);

	gfpic->irq_domain = irq_domain_add_legacy(of_node, GFPIC_NR_IRQS,
						  GFPIC_IRQ_BASE, 0,
						  &goldfish_irq_domain_ops,
						  NULL);
	if (!gfpic->irq_domain) {
		pr_err("Failed to add irqdomain!\n");
		ret = -ENOMEM;
		goto out_destroy_generic_chip;
	}

	irq_set_chained_handler_and_data(parent_irq,
					 goldfish_pic_cascade, gfpic);

	pr_info("Successfully registered.\n");
	return 0;

out_destroy_generic_chip:
	irq_destroy_generic_chip(gc, IRQ_MSK(GFPIC_NR_IRQS),
				 IRQ_NOPROBE | IRQ_LEVEL, 0);
out_iounmap:
	iounmap(gfpic->base);
out_unmap_irq:
	irq_dispose_mapping(parent_irq);
out_free:
	kfree(gfpic);
out_err:
	pr_err("Failed to initialize! (errno = %d)\n", ret);
	return ret;
}
Ejemplo n.º 28
0
static int __init imx53_tzic_add_irq_domain(struct device_node *np,
				struct device_node *interrupt_parent)
{
	irq_domain_add_legacy(np, 128, 0, 0, &irq_domain_simple_ops, NULL);
	return 0;
}
Ejemplo n.º 29
0
/**
 * regmap_add_irq_chip(): Use standard regmap IRQ controller handling
 *
 * map:       The regmap for the device.
 * irq:       The IRQ the device uses to signal interrupts
 * irq_flags: The IRQF_ flags to use for the primary interrupt.
 * chip:      Configuration for the interrupt controller.
 * data:      Runtime data structure for the controller, allocated on success
 *
 * Returns 0 on success or an errno on failure.
 *
 * In order for this to be efficient the chip really should use a
 * register cache.  The chip driver is responsible for restoring the
 * register values used by the IRQ controller over suspend and resume.
 */
int regmap_add_irq_chip(struct regmap *map, int irq, int irq_flags,
			int irq_base, const struct regmap_irq_chip *chip,
			struct regmap_irq_chip_data **data)
{
	struct regmap_irq_chip_data *d;
	int i;
	int ret = -ENOMEM;
	u32 reg;
	u32 unmask_offset;

	if (chip->num_regs <= 0)
		return -EINVAL;

	for (i = 0; i < chip->num_irqs; i++) {
		if (chip->irqs[i].reg_offset % map->reg_stride)
			return -EINVAL;
		if (chip->irqs[i].reg_offset / map->reg_stride >=
		    chip->num_regs)
			return -EINVAL;
	}

	if (irq_base) {
		irq_base = irq_alloc_descs(irq_base, 0, chip->num_irqs, 0);
		if (irq_base < 0) {
			dev_warn(map->dev, "Failed to allocate IRQs: %d\n",
				 irq_base);
			return irq_base;
		}
	}

	d = kzalloc(sizeof(*d), GFP_KERNEL);
	if (!d)
		return -ENOMEM;

	d->status_buf = kcalloc(chip->num_regs, sizeof(unsigned int),
				GFP_KERNEL);
	if (!d->status_buf)
		goto err_alloc;

	d->mask_buf = kcalloc(chip->num_regs, sizeof(unsigned int),
			      GFP_KERNEL);
	if (!d->mask_buf)
		goto err_alloc;

	d->mask_buf_def = kcalloc(chip->num_regs, sizeof(unsigned int),
				  GFP_KERNEL);
	if (!d->mask_buf_def)
		goto err_alloc;

	if (chip->wake_base) {
		d->wake_buf = kcalloc(chip->num_regs, sizeof(unsigned int),
				      GFP_KERNEL);
		if (!d->wake_buf)
			goto err_alloc;
	}

	if (chip->num_type_reg) {
		d->type_buf_def = kcalloc(chip->num_type_reg,
					sizeof(unsigned int), GFP_KERNEL);
		if (!d->type_buf_def)
			goto err_alloc;

		d->type_buf = kcalloc(chip->num_type_reg, sizeof(unsigned int),
				      GFP_KERNEL);
		if (!d->type_buf)
			goto err_alloc;
	}

	d->irq_chip = regmap_irq_chip;
	d->irq_chip.name = chip->name;
	d->irq = irq;
	d->map = map;
	d->chip = chip;
	d->irq_base = irq_base;

	if (chip->irq_reg_stride)
		d->irq_reg_stride = chip->irq_reg_stride;
	else
		d->irq_reg_stride = 1;

	if (chip->type_reg_stride)
		d->type_reg_stride = chip->type_reg_stride;
	else
		d->type_reg_stride = 1;

	if (!map->use_single_read && map->reg_stride == 1 &&
	    d->irq_reg_stride == 1) {
		d->status_reg_buf = kmalloc_array(chip->num_regs,
						  map->format.val_bytes,
						  GFP_KERNEL);
		if (!d->status_reg_buf)
			goto err_alloc;
	}

	mutex_init(&d->lock);

	for (i = 0; i < chip->num_irqs; i++)
		d->mask_buf_def[chip->irqs[i].reg_offset / map->reg_stride]
			|= chip->irqs[i].mask;

	/* Mask all the interrupts by default */
	for (i = 0; i < chip->num_regs; i++) {
		d->mask_buf[i] = d->mask_buf_def[i];
		reg = chip->mask_base +
			(i * map->reg_stride * d->irq_reg_stride);
		if (chip->mask_invert)
			ret = regmap_update_bits(map, reg,
					 d->mask_buf[i], ~d->mask_buf[i]);
		else if (d->chip->unmask_base) {
			unmask_offset = d->chip->unmask_base -
					d->chip->mask_base;
			ret = regmap_update_bits(d->map,
					reg + unmask_offset,
					d->mask_buf[i],
					d->mask_buf[i]);
		} else
			ret = regmap_update_bits(map, reg,
					 d->mask_buf[i], d->mask_buf[i]);
		if (ret != 0) {
			dev_err(map->dev, "Failed to set masks in 0x%x: %d\n",
				reg, ret);
			goto err_alloc;
		}

		if (!chip->init_ack_masked)
			continue;

		/* Ack masked but set interrupts */
		reg = chip->status_base +
			(i * map->reg_stride * d->irq_reg_stride);
		ret = regmap_read(map, reg, &d->status_buf[i]);
		if (ret != 0) {
			dev_err(map->dev, "Failed to read IRQ status: %d\n",
				ret);
			goto err_alloc;
		}

		if (d->status_buf[i] && (chip->ack_base || chip->use_ack)) {
			reg = chip->ack_base +
				(i * map->reg_stride * d->irq_reg_stride);
			if (chip->ack_invert)
				ret = regmap_write(map, reg,
					~(d->status_buf[i] & d->mask_buf[i]));
			else
				ret = regmap_write(map, reg,
					d->status_buf[i] & d->mask_buf[i]);
			if (ret != 0) {
				dev_err(map->dev, "Failed to ack 0x%x: %d\n",
					reg, ret);
				goto err_alloc;
			}
		}
	}

	/* Wake is disabled by default */
	if (d->wake_buf) {
		for (i = 0; i < chip->num_regs; i++) {
			d->wake_buf[i] = d->mask_buf_def[i];
			reg = chip->wake_base +
				(i * map->reg_stride * d->irq_reg_stride);

			if (chip->wake_invert)
				ret = regmap_update_bits(map, reg,
							 d->mask_buf_def[i],
							 0);
			else
				ret = regmap_update_bits(map, reg,
							 d->mask_buf_def[i],
							 d->wake_buf[i]);
			if (ret != 0) {
				dev_err(map->dev, "Failed to set masks in 0x%x: %d\n",
					reg, ret);
				goto err_alloc;
			}
		}
	}

	if (chip->num_type_reg) {
		for (i = 0; i < chip->num_irqs; i++) {
			reg = chip->irqs[i].type_reg_offset / map->reg_stride;
			d->type_buf_def[reg] |= chip->irqs[i].type_rising_mask |
					chip->irqs[i].type_falling_mask;
		}
		for (i = 0; i < chip->num_type_reg; ++i) {
			if (!d->type_buf_def[i])
				continue;

			reg = chip->type_base +
				(i * map->reg_stride * d->type_reg_stride);
			if (chip->type_invert)
				ret = regmap_update_bits(map, reg,
					d->type_buf_def[i], 0xFF);
			else
				ret = regmap_update_bits(map, reg,
					d->type_buf_def[i], 0x0);
			if (ret != 0) {
				dev_err(map->dev,
					"Failed to set type in 0x%x: %x\n",
					reg, ret);
				goto err_alloc;
			}
		}
	}

	if (irq_base)
		d->domain = irq_domain_add_legacy(map->dev->of_node,
						  chip->num_irqs, irq_base, 0,
						  &regmap_domain_ops, d);
	else
		d->domain = irq_domain_add_linear(map->dev->of_node,
						  chip->num_irqs,
						  &regmap_domain_ops, d);
	if (!d->domain) {
		dev_err(map->dev, "Failed to create IRQ domain\n");
		ret = -ENOMEM;
		goto err_alloc;
	}

	ret = request_threaded_irq(irq, NULL, regmap_irq_thread,
				   irq_flags | IRQF_ONESHOT,
				   chip->name, d);
	if (ret != 0) {
		dev_err(map->dev, "Failed to request IRQ %d for %s: %d\n",
			irq, chip->name, ret);
		goto err_domain;
	}

	*data = d;

	return 0;

err_domain:
	/* Should really dispose of the domain but... */
err_alloc:
	kfree(d->type_buf);
	kfree(d->type_buf_def);
	kfree(d->wake_buf);
	kfree(d->mask_buf_def);
	kfree(d->mask_buf);
	kfree(d->status_buf);
	kfree(d->status_reg_buf);
	kfree(d);
	return ret;
}
static int s2mu003_mfd_probe(struct i2c_client *i2c,
			    const struct i2c_device_id *id)
{
	int ret = 0;
	u8 data = 0;
	struct device_node *of_node = i2c->dev.of_node;
	s2mu003_mfd_chip_t *chip;
	s2mu003_mfd_platform_data_t *pdata = i2c->dev.platform_data;

	pr_info("%s : S2MU003 MFD Driver start probe\n", __func__);
	if (of_node) {
		pdata = devm_kzalloc(&i2c->dev, sizeof(*pdata), GFP_KERNEL);
		if (!pdata) {
			dev_err(&i2c->dev, "Failed to allocate memory\n");
			ret = -ENOMEM;
			goto err_dt_nomem;
		}
		ret = s2mu003mfd_parse_dt(&i2c->dev, pdata);
		if (ret < 0)
			goto err_parse_dt;
	} else {
		pdata = i2c->dev.platform_data;
	}

	chip = kzalloc(sizeof(*chip), GFP_KERNEL);
	if (chip ==  NULL) {
		dev_err(&i2c->dev, "Memory is not enough.\n");
		ret = -ENOMEM;
		goto err_mfd_nomem;
	}
	chip->dev = &i2c->dev;

	ret = i2c_check_functionality(i2c->adapter, I2C_FUNC_SMBUS_BYTE_DATA |
			I2C_FUNC_SMBUS_WORD_DATA | I2C_FUNC_SMBUS_I2C_BLOCK);
	if (!ret) {
		ret = i2c_get_functionality(i2c->adapter);
		dev_err(chip->dev, "I2C functionality is not supported.\n");
		ret = -ENOSYS;
		goto err_i2cfunc_not_support;
	}

	chip->i2c_client = i2c;
	chip->pdata = pdata;

	/* if board-init had already assigned irq_base (>=0) ,
	no need to allocate it;
	assign -1 to let this driver allocate resource by itself*/
	if (pdata->irq_base < 0)
		pdata->irq_base = irq_alloc_descs(-1, 0, S2MU003_IRQS_NR, 0);
	if (pdata->irq_base < 0) {
		pr_err("%s:%s irq_alloc_descs Fail! ret(%d)\n",
				"s2mu003-mfd", __func__, pdata->irq_base);
		ret = -EINVAL;
		goto irq_base_err;
	} else {
		chip->irq_base = pdata->irq_base;
		pr_info("%s:%s irq_base = %d\n",
				"s2mu003-mfd", __func__, chip->irq_base);
	irq_domain_add_legacy(of_node, S2MU003_IRQS_NR, chip->irq_base, 0,
				&irq_domain_simple_ops, NULL);
	}

	i2c_set_clientdata(i2c, chip);
	mutex_init(&chip->io_lock);

	wake_lock_init(&(chip->irq_wake_lock), WAKE_LOCK_SUSPEND,
			"s2mu003mfd_wakelock");

	/* To disable MRST function should be
	finished before set any reg init-value*/
	data = s2mu003_reg_read(i2c, 0x47);
	pr_info("%s : Manual Reset Data = 0x%x", __func__, data);
	s2mu003_clr_bits(i2c, 0x47, 1<<3); /*Disable Manual Reset*/

	ret = s2mu003_init_irq(chip);

	if (ret < 0) {
		dev_err(chip->dev,
				"Error : can't initialize S2MU003 MFD irq\n");
		goto err_init_irq;
	}

#ifdef CONFIG_REGULATOR_S2MU003
	ret = mfd_add_devices(chip->dev, 0, &s2mu003_regulator_devs[0],
			ARRAY_SIZE(s2mu003_regulator_devs),
			NULL, chip->irq_base, NULL);
	if (ret < 0) {
		dev_err(chip->dev,
				"Error : can't add regulator\n");
		goto err_add_regulator_devs;
	}
#endif /*CONFIG_REGULATOR_S2MU003*/

#ifdef CONFIG_LEDS_S2MU003
	ret = mfd_add_devices(chip->dev, 0, &s2mu003_fled_devs[0],
			ARRAY_SIZE(s2mu003_fled_devs),
			NULL, chip->irq_base, NULL);
	if (ret < 0) {
		dev_err(chip->dev, "Failed : add FlashLED devices");
		goto err_add_fled_devs;
	}
#endif /*CONFIG_LEDS_S2MU003*/


#ifdef CONFIG_CHARGER_S2MU003
	ret = mfd_add_devices(chip->dev, 0, &s2mu003_charger_devs[0],
			ARRAY_SIZE(s2mu003_charger_devs),
			NULL, chip->irq_base, NULL);
	if (ret < 0) {
		dev_err(chip->dev, "Failed : add charger devices\n");
		goto err_add_chg_devs;
	}
#endif /*CONFIG_CHARGER_S2MU003*/

	device_init_wakeup(chip->dev, 1);
	enable_irq_wake(chip->irq);

	pr_info("%s : S2MU003 MFD Driver Fin probe\n", __func__);
	return ret;

#ifdef CONFIG_CHARGER_S2MU003
err_add_chg_devs:
#endif /*CONFIG_CHARGER_S2MU003*/

#ifdef CONFIG_LEDS_S2MU003
err_add_fled_devs:
#endif /*CONFIG_LEDS_S2MU003*/
	mfd_remove_devices(chip->dev);
#ifdef CONFIG_REGULATOR_S2MU003
err_add_regulator_devs:
#endif /*CONFIG_REGULATOR_S2MU003*/
err_init_irq:
	wake_lock_destroy(&(chip->irq_wake_lock));
	mutex_destroy(&chip->io_lock);
irq_base_err:
err_i2cfunc_not_support:
	kfree(chip);
err_mfd_nomem:
	if(pdata)
		kfree(pdata);
err_parse_dt:
err_dt_nomem:
	return ret;
}