Exemple #1
0
static int __init nps400_of_init(struct device_node *node,
				 struct device_node *parent)
{
	struct irq_domain *nps400_root_domain;

	if (parent) {
		pr_err("DeviceTree incore ic not a root irq controller\n");
		return -EINVAL;
	}

	nps400_root_domain = irq_domain_add_linear(node, NPS_NR_CPU_IRQS,
						   &nps400_irq_ops, NULL);

	if (!nps400_root_domain) {
		pr_err("nps400 root irq domain not avail\n");
		return -ENOMEM;
	}

	/*
	 * Needed for primary domain lookup to succeed
	 * This is a primary irqchip, and can never have a parent
	 */
	irq_set_default_host(nps400_root_domain);

#ifdef CONFIG_SMP
	irq_create_mapping(nps400_root_domain, NPS_IPI_IRQ);
#endif

	return 0;
}
static int __init
init_onchip_IRQ(struct device_node *intc, struct device_node *parent)
{
	struct irq_domain *root_domain;
	struct bcr_irq_arcv2 irq_bcr;
	unsigned int nr_cpu_irqs;

	READ_BCR(ARC_REG_IRQ_BCR, irq_bcr);
	nr_cpu_irqs = irq_bcr.irqs + NR_EXCEPTIONS;

	if (parent)
		panic("DeviceTree incore intc not a root irq controller\n");

	root_domain = irq_domain_add_linear(intc, nr_cpu_irqs, &arcv2_irq_ops, NULL);
	if (!root_domain)
		panic("root irq domain not avail\n");

	/*
	 * Needed for primary domain lookup to succeed
	 * This is a primary irqchip, and can never have a parent
	 */
	irq_set_default_host(root_domain);

#ifdef CONFIG_SMP
	irq_create_mapping(root_domain, IPI_IRQ);
#endif
	irq_create_mapping(root_domain, SOFTIRQ_IRQ);

	return 0;
}
Exemple #3
0
static int __init
init_onchip_IRQ(struct device_node *intc, struct device_node *parent)
{
	struct irq_domain *root_domain;

	if (parent)
		panic("DeviceTree incore intc not a root irq controller\n");

	root_domain = irq_domain_add_linear(intc, NR_CPU_IRQS, &arcv2_irq_ops, NULL);
	if (!root_domain)
		panic("root irq domain not avail\n");

	/*
	 * Needed for primary domain lookup to succeed
	 * This is a primary irqchip, and can never have a parent
	 */
	irq_set_default_host(root_domain);

#ifdef CONFIG_SMP
	irq_create_mapping(root_domain, IPI_IRQ);
#endif
	irq_create_mapping(root_domain, SOFTIRQ_IRQ);

	return 0;
}
Exemple #4
0
static void __init xics_init_host(void)
{
	xics_host = irq_alloc_host(NULL, IRQ_HOST_MAP_TREE, 0, &xics_host_ops,
				   XICS_IRQ_SPURIOUS);
	BUG_ON(xics_host == NULL);
	irq_set_default_host(xics_host);
}
Exemple #5
0
/*
 * This is called by init_IRQ.  set in ppc_md.init_IRQ by iSeries_setup.c
 * It must be called before the bus walk.
 */
void __init iSeries_init_IRQ(void)
{
	/* Register PCI event handler and open an event path */
	struct irq_host *host;
	int ret;

	/*
	 * The Hypervisor only allows us up to 256 interrupt
	 * sources (the irq number is passed in a u8).
	 */
	irq_set_virq_count(256);

	/* Create irq host. No need for a revmap since HV will give us
	 * back our virtual irq number
	 */
	host = irq_alloc_host(IRQ_HOST_MAP_NOMAP, 0, &iseries_irq_host_ops, 0);
	BUG_ON(host == NULL);
	irq_set_default_host(host);

	ret = HvLpEvent_registerHandler(HvLpEvent_Type_PciIo,
			&pci_event_handler);
	if (ret == 0) {
		ret = HvLpEvent_openPath(HvLpEvent_Type_PciIo, 0);
		if (ret != 0)
			printk(KERN_ERR "iseries_init_IRQ: open event path "
					"failed with rc 0x%x\n", ret);
	} else
		printk(KERN_ERR "iseries_init_IRQ: register handler "
				"failed with rc 0x%x\n", ret);
}
Exemple #6
0
void __init init_IRQ(void)
{
	struct device_node *np;

	/* Mask all priority IRQs */
	and_creg(IER, ~0xfff0);

	np = of_find_compatible_node(NULL, NULL, "ti,c64x+core-pic");
	if (np != NULL) {
		/* create the core host */
		core_domain = irq_domain_add_linear(np, NR_PRIORITY_IRQS,
						    &core_domain_ops, NULL);
		if (core_domain)
			irq_set_default_host(core_domain);
		of_node_put(np);
	}

	printk(KERN_INFO "Core interrupt controller initialized\n");

	/* now we're ready for other SoC controllers */
	megamod_pic_init();

	/* Clear all general IRQ flags */
	set_creg(ICR, 0xfff0);
}
Exemple #7
0
void __init amigaone_init_IRQ(void)
{
	struct device_node *pic, *np = NULL;
	const unsigned long *prop = NULL;
	unsigned long int_ack = 0;

	/* Search for ISA interrupt controller. */
	pic = of_find_compatible_node(NULL, "interrupt-controller",
	                              "pnpPNP,000");
	BUG_ON(pic == NULL);

	/* Look for interrupt acknowledge address in the PCI root node. */
	np = of_find_compatible_node(NULL, "pci", "mai-logic,articia-s");
	if (np) {
		prop = of_get_property(np, "8259-interrupt-acknowledge", NULL);
		if (prop)
			int_ack = prop[0];
		of_node_put(np);
	}

	if (int_ack == 0)
		printk(KERN_WARNING "Cannot find PCI interrupt acknowledge"
		       " address, polling\n");

	i8259_init(pic, int_ack);
	ppc_md.get_irq = i8259_irq;
	irq_set_default_host(i8259_get_host());
}
Exemple #8
0
int __init xtensa_pic_init_legacy(struct device_node *interrupt_parent)
{
	struct irq_domain *root_domain =
		irq_domain_add_legacy(NULL, NR_IRQS - 1, 1, 0,
				&xtensa_irq_domain_ops, &xtensa_irq_chip);
	irq_set_default_host(root_domain);
	return 0;
}
/**
 * mpc52xx_init_irq - Initialize and register with the virq subsystem
 *
 * Hook for setting up IRQs on an mpc5200 system.  A pointer to this function
 * is to be put into the machine definition structure.
 *
 * This function searches the device tree for an MPC5200 interrupt controller,
 * initializes it, and registers it with the virq subsystem.
 */
void __init mpc52xx_init_irq(void)
{
    u32 intr_ctrl;
    struct device_node *picnode;
    struct device_node *np;

    /* Remap the necessary zones */
    picnode = of_find_matching_node(NULL, mpc52xx_pic_ids);
    intr = of_iomap(picnode, 0);
    if (!intr)
        panic(__FILE__	": find_and_map failed on 'mpc5200-pic'. "
              "Check node !");

    np = of_find_matching_node(NULL, mpc52xx_sdma_ids);
    sdma = of_iomap(np, 0);
    of_node_put(np);
    if (!sdma)
        panic(__FILE__	": find_and_map failed on 'mpc5200-bestcomm'. "
              "Check node !");

    pr_debug("MPC5200 IRQ controller mapped to 0x%p\n", intr);

    /* Disable all interrupt sources. */
    out_be32(&sdma->IntPend, 0xffffffff);	/* 1 means clear pending */
    out_be32(&sdma->IntMask, 0xffffffff);	/* 1 means disabled */
    out_be32(&intr->per_mask, 0x7ffffc00);	/* 1 means disabled */
    out_be32(&intr->main_mask, 0x00010fff);	/* 1 means disabled */
    intr_ctrl = in_be32(&intr->ctrl);
    intr_ctrl &= 0x00ff0000;	/* Keeps IRQ[0-3] config */
    intr_ctrl |=	0x0f000000 |	/* clear IRQ 0-3 */
                    0x00001000 |	/* MEE master external enable */
                    0x00000000 |	/* 0 means disable IRQ 0-3 */
                    0x00000001;	/* CEb route critical normally */
    out_be32(&intr->ctrl, intr_ctrl);

    /* Zero a bunch of the priority settings. */
    out_be32(&intr->per_pri1, 0);
    out_be32(&intr->per_pri2, 0);
    out_be32(&intr->per_pri3, 0);
    out_be32(&intr->main_pri1, 0);
    out_be32(&intr->main_pri2, 0);

    /*
     * As last step, add an irq host to translate the real
     * hw irq information provided by the ofw to linux virq
     */
    mpc52xx_irqhost = irq_alloc_host(picnode, IRQ_HOST_MAP_LINEAR,
                                     MPC52xx_IRQ_HIGHTESTHWIRQ,
                                     &mpc52xx_irqhost_ops, -1);

    if (!mpc52xx_irqhost)
        panic(__FILE__ ": Cannot allocate the IRQ host\n");

    irq_set_default_host(mpc52xx_irqhost);

    pr_info("MPC52xx PIC is up and running!\n");
}
Exemple #10
0
static int __init xtensa_pic_init(struct device_node *np,
		struct device_node *interrupt_parent)
{
	struct irq_domain *root_domain =
		irq_domain_add_linear(np, NR_IRQS, &xtensa_irq_domain_ops,
				&xtensa_irq_chip);
	irq_set_default_host(root_domain);
	return 0;
}
Exemple #11
0
void __init init_IRQ(void)
{
	u32 nr_irq, intr_mask;
	struct device_node *intc = NULL;
#ifdef CONFIG_SELFMOD_INTC
	unsigned int intc_baseaddr = 0;
	static int arr_func[] = {
				(int)&get_irq,
				(int)&intc_enable_or_unmask,
				(int)&intc_disable_or_mask,
				(int)&intc_mask_ack,
				(int)&intc_ack,
				(int)&intc_end,
				0
			};
#endif
	intc = of_find_compatible_node(NULL, NULL, "xlnx,xps-intc-1.00.a");
	BUG_ON(!intc);

	intc_baseaddr = be32_to_cpup(of_get_property(intc, "reg", NULL));
	intc_baseaddr = (unsigned long) ioremap(intc_baseaddr, PAGE_SIZE);
	nr_irq = be32_to_cpup(of_get_property(intc,
						"xlnx,num-intr-inputs", NULL));

	intr_mask =
		be32_to_cpup(of_get_property(intc, "xlnx,kind-of-intr", NULL));
	if (intr_mask > (u32)((1ULL << nr_irq) - 1))
		pr_info(" ERROR: Mismatch in kind-of-intr param\n");

#ifdef CONFIG_SELFMOD_INTC
	selfmod_function((int *) arr_func, intc_baseaddr);
#endif
	pr_info("%s #0 at 0x%08x, num_irq=%d, edge=0x%x\n",
		intc->name, intc_baseaddr, nr_irq, intr_mask);

	/*
	 * Disable all external interrupts until they are
	 * explicity requested.
	 */
	out_be32(intc_baseaddr + IER, 0);

	/* Acknowledge any pending interrupts just in case. */
	out_be32(intc_baseaddr + IAR, 0xffffffff);

	/* Turn on the Master Enable. */
	out_be32(intc_baseaddr + MER, MER_HIE | MER_ME);

	/* Yeah, okay, casting the intr_mask to a void* is butt-ugly, but I'm
	 * lazy and Michal can clean it up to something nicer when he tests
	 * and commits this patch.  ~~gcl */
	root_domain = irq_domain_add_linear(intc, nr_irq, &xintc_irq_domain_ops,
							(void *)intr_mask);

	irq_set_default_host(root_domain);
}
Exemple #12
0
void __init mtvic_init(int def)
{
	static unsigned virqs;


	virq_host = irq_domain_add_linear(NULL , 32, &mtvic_ops, NULL);

	virq_host->host_data = &virqs;

	if (def)
		irq_set_default_host(virq_host);
}
Exemple #13
0
static void __init xics_init_host(void)
{
	if (firmware_has_feature(FW_FEATURE_LPAR))
		xics_irq_chip = &xics_pic_lpar;
	else
		xics_irq_chip = &xics_pic_direct;

	xics_host = irq_alloc_host(NULL, IRQ_HOST_MAP_TREE, 0, &xics_host_ops,
				   XICS_IRQ_SPURIOUS);
	BUG_ON(xics_host == NULL);
	irq_set_default_host(xics_host);
}
Exemple #14
0
void __init mpc52xx_init_irq(void)
{
	u32 intr_ctrl;
	struct device_node *picnode;
	struct device_node *np;

	
	picnode = of_find_matching_node(NULL, mpc52xx_pic_ids);
	intr = of_iomap(picnode, 0);
	if (!intr)
		panic(__FILE__	": find_and_map failed on 'mpc5200-pic'. "
				"Check node !");

	np = of_find_matching_node(NULL, mpc52xx_sdma_ids);
	sdma = of_iomap(np, 0);
	of_node_put(np);
	if (!sdma)
		panic(__FILE__	": find_and_map failed on 'mpc5200-bestcomm'. "
				"Check node !");

	pr_debug("MPC5200 IRQ controller mapped to 0x%p\n", intr);

	
	out_be32(&sdma->IntPend, 0xffffffff);	
	out_be32(&sdma->IntMask, 0xffffffff);	
	out_be32(&intr->per_mask, 0x7ffffc00);	
	out_be32(&intr->main_mask, 0x00010fff);	
	intr_ctrl = in_be32(&intr->ctrl);
	intr_ctrl &= 0x00ff0000;	
	intr_ctrl |=	0x0f000000 |	
			0x00001000 |	
			0x00000000 |	
			0x00000001;	
	out_be32(&intr->ctrl, intr_ctrl);

	
	out_be32(&intr->per_pri1, 0);
	out_be32(&intr->per_pri2, 0);
	out_be32(&intr->per_pri3, 0);
	out_be32(&intr->main_pri1, 0);
	out_be32(&intr->main_pri2, 0);

	mpc52xx_irqhost = irq_domain_add_linear(picnode,
	                                 MPC52xx_IRQ_HIGHTESTHWIRQ,
	                                 &mpc52xx_irqhost_ops, NULL);

	if (!mpc52xx_irqhost)
		panic(__FILE__ ": Cannot allocate the IRQ host\n");

	irq_set_default_host(mpc52xx_irqhost);

	pr_info("MPC52xx PIC is up and running!\n");
}
void __init mmp_dt_irq_init(void)
{
	struct device_node *node;
	const struct of_device_id *of_id;
	struct mmp_intc_conf *conf;
	int nr_irqs, irq_base, ret, irq;

	node = of_find_matching_node(NULL, intc_ids);
	if (!node) {
		pr_err("Failed to find interrupt controller in arch-mmp\n");
		return;
	}
	of_id = of_match_node(intc_ids, node);
	conf = of_id->data;

	ret = of_property_read_u32(node, "mrvl,intc-nr-irqs", &nr_irqs);
	if (ret) {
		pr_err("Not found mrvl,intc-nr-irqs property\n");
		return;
	}

	mmp_icu_base = of_iomap(node, 0);
	if (!mmp_icu_base) {
		pr_err("Failed to get interrupt controller register\n");
		return;
	}

	irq_base = irq_alloc_descs(-1, 0, nr_irqs - NR_IRQS_LEGACY, 0);
	if (irq_base < 0) {
		pr_err("Failed to allocate IRQ numbers\n");
		goto err;
	} else if (irq_base != NR_IRQS_LEGACY) {
		pr_err("ICU's irqbase should be started from 0\n");
		goto err;
	}
	icu_data[0].conf_enable = conf->conf_enable;
	icu_data[0].conf_disable = conf->conf_disable;
	icu_data[0].conf_mask = conf->conf_mask;
	icu_data[0].nr_irqs = nr_irqs;
	icu_data[0].virq_base = 0;
	icu_data[0].domain = irq_domain_add_legacy(node, nr_irqs, 0, 0,
						   &mmp_irq_domain_ops,
						   &icu_data[0]);
	irq_set_default_host(icu_data[0].domain);
	for (irq = 0; irq < nr_irqs; irq++)
		icu_mask_irq(irq_get_irq_data(irq));
	mmp2_mux_init(node);
	return;
err:
	iounmap(mmp_icu_base);
}
Exemple #16
0
void __init init_IRQ(void)
{
	struct irq_domain *domain;
	struct device_node *node;

	node = of_find_compatible_node(NULL, NULL, "ALTR,nios2-1.0");
	BUG_ON(!node);

	domain = irq_domain_add_linear(node, NIOS2_CPU_NR_IRQS, &irq_ops, NULL);
	BUG_ON(!domain);

	irq_set_default_host(domain);
	of_node_put(node);
}
void __init flipper_pic_probe(void)
{
	struct device_node *np;

	np = of_find_compatible_node(NULL, NULL, "nintendo,flipper-pic");
	BUG_ON(!np);

	flipper_irq_host = flipper_pic_init(np);
	BUG_ON(!flipper_irq_host);

	irq_set_default_host(flipper_irq_host);

	of_node_put(np);
}
Exemple #18
0
static int __init h8300h_intc_of_init(struct device_node *intc,
				      struct device_node *parent)
{
	struct irq_domain *domain;

	intc_baseaddr = of_iomap(intc, 0);
	BUG_ON(!intc_baseaddr);

	/* All interrupt priority low */
	ctrl_outb(0x00, IPR + 0);
	ctrl_outb(0x00, IPR + 1);

	domain = irq_domain_add_linear(intc, NR_IRQS, &irq_ops, NULL);
	BUG_ON(!domain);
	irq_set_default_host(domain);
	return 0;
}
Exemple #19
0
void __init ehv_pic_init(void)
{
	struct device_node *np, *np2;
	struct ehv_pic *ehv_pic;
	int coreint_flag = 1;

	np = of_find_compatible_node(NULL, NULL, "epapr,hv-pic");
	if (!np) {
		pr_err("ehv_pic_init: could not find epapr,hv-pic node\n");
		return;
	}

	if (!of_find_property(np, "has-external-proxy", NULL))
		coreint_flag = 0;

	ehv_pic = kzalloc(sizeof(struct ehv_pic), GFP_KERNEL);
	if (!ehv_pic) {
		of_node_put(np);
		return;
	}

	ehv_pic->irqhost = irq_alloc_host(np, IRQ_HOST_MAP_LINEAR,
		NR_EHV_PIC_INTS, &ehv_pic_host_ops, 0);

	if (!ehv_pic->irqhost) {
		of_node_put(np);
		return;
	}

	np2 = of_find_compatible_node(NULL, NULL, "fsl,hv-mpic-per-cpu");
	if (np2) {
		mpic_percpu_base_vaddr = of_iomap(np2, 0);
		if (!mpic_percpu_base_vaddr)
			pr_err("ehv_pic_init: of_iomap failed\n");

		of_node_put(np2);
	}

	ehv_pic->irqhost->host_data = ehv_pic;
	ehv_pic->hc_irq = ehv_pic_irq_chip;
	ehv_pic->hc_irq.irq_set_affinity = ehv_pic_set_affinity;
	ehv_pic->coreint_flag = coreint_flag;

	global_ehv_pic = ehv_pic;
	irq_set_default_host(global_ehv_pic->irqhost);
}
int __init at91_aic_of_init(struct device_node *node,
				     struct device_node *parent)
{
	at91_aic_base = of_iomap(node, 0);
	at91_aic_np = node;

	at91_aic_domain = irq_domain_add_linear(at91_aic_np, NR_AIC_IRQS,
						&at91_aic_irq_ops, NULL);
	if (!at91_aic_domain)
		panic("Unable to add AIC irq domain (DT)\n");

	irq_set_default_host(at91_aic_domain);

	at91_aic_hw_init(NR_AIC_IRQS);

	return 0;
}
Exemple #21
0
static int __init
init_onchip_IRQ(struct device_node *intc, struct device_node *parent)
{
	if (parent)
		panic("DeviceTree incore intc not a root irq controller\n");

	root_domain = irq_domain_add_legacy(intc, NR_CPU_IRQS, 0, 0,
					    &arcv2_irq_ops, NULL);

	if (!root_domain)
		panic("root irq domain not avail\n");

	/* with this we don't need to export root_domain */
	irq_set_default_host(root_domain);

	return 0;
}
Exemple #22
0
void __init init_IRQ(void)
{
	struct irq_domain *domain;
	struct device_node *node;

	node = of_find_compatible_node(NULL, NULL, "altr,nios2-1.0");
	if (!node)
		node = of_find_compatible_node(NULL, NULL, "altr,nios2-1.1");

	BUG_ON(!node);

	domain = irq_domain_add_linear(node, NIOS2_CPU_NR_IRQS, &irq_ops, NULL);
	BUG_ON(!domain);

	irq_set_default_host(domain);
	of_node_put(node);
	/* Load the initial ienable value */
	ienable = RDCTL(CTL_IENABLE);
}
void __init xilinx_intc_init_tree(void)
{
	struct device_node *np;

	/*                                     */
	for_each_matching_node(np, xilinx_intc_match) {
		if (!of_get_property(np, "interrupts", NULL))
			break;
	}
	BUG_ON(!np);

	master_irqhost = xilinx_intc_init(np);
	BUG_ON(!master_irqhost);

	irq_set_default_host(master_irqhost);
	of_node_put(np);

	xilinx_i8259_setup_cascade();
}
Exemple #24
0
int __init vt8500_irq_init(struct device_node *node, struct device_node *parent)
{
    struct irq_domain *vt8500_irq_domain;
    struct vt8500_irq_priv *priv;
    int irq, i;
    struct device_node *np = node;

    priv = kzalloc(sizeof(struct vt8500_irq_priv), GFP_KERNEL);
    priv->base = of_iomap(np, 0);

    vt8500_irq_domain = irq_domain_add_legacy(node, 64, irq_cnt, 0,
                        &vt8500_irq_domain_ops, priv);
    if (!vt8500_irq_domain)
        pr_err("%s: Unable to add wmt irq domain!\n", __func__);

    irq_set_default_host(vt8500_irq_domain);

    vt8500_init_irq_hw(priv->base);

    pr_info("Added IRQ Controller @ %x [virq_base = %d]\n",
            (u32)(priv->base), irq_cnt);

    /* check if this is a slaved controller */
    if (of_irq_count(np) != 0) {
        /* check that we have the correct number of interrupts */
        if (of_irq_count(np) != 8) {
            pr_err("%s: Incorrect IRQ map for slave controller\n",
                   __func__);
            return -EINVAL;
        }

        for (i = 0; i < 8; i++) {
            irq = irq_of_parse_and_map(np, i);
            enable_irq(irq);
        }

        pr_info("vt8500-irq: Enabled slave->parent interrupts\n");
    }

    irq_cnt += 64;

    return 0;
}
/*
 * Initialize the AIC interrupt controller.
 */
void __init at91_aic_init(unsigned int priority[NR_AIC_IRQS])
{
	unsigned int i;
	int irq_base;

	at91_aic_base = ioremap(AT91_AIC, 512);
	if (!at91_aic_base)
		panic("Unable to ioremap AIC registers\n");

	/* Add irq domain for AIC */
	irq_base = irq_alloc_descs(-1, 0, NR_AIC_IRQS, 0);
	if (irq_base < 0) {
		WARN(1, "Cannot allocate irq_descs, assuming pre-allocated\n");
		irq_base = 0;
	}
	at91_aic_domain = irq_domain_add_legacy(at91_aic_np, NR_AIC_IRQS,
						irq_base, 0,
						&irq_domain_simple_ops, NULL);

	if (!at91_aic_domain)
		panic("Unable to add AIC irq domain\n");

	irq_set_default_host(at91_aic_domain);

	/*
	 * The IVR is used by macro get_irqnr_and_base to read and verify.
	 * The irq number is NR_AIC_IRQS when a spurious interrupt has occurred.
	 */
	for (i = 0; i < NR_AIC_IRQS; i++) {
		/* Put hardware irq number in Source Vector Register: */
		at91_aic_write(AT91_AIC_SVR(i), i);
		/* Active Low interrupt, with the specified priority */
		at91_aic_write(AT91_AIC_SMR(i), AT91_AIC_SRCTYPE_LOW | priority[i]);

		irq_set_chip_and_handler(i, &at91_aic_chip, handle_level_irq);
		set_irq_flags(i, IRQF_VALID | IRQF_PROBE);
	}

	at91_aic_hw_init(NR_AIC_IRQS);
}
/* MMP (ARMv5) */
void __init icu_init_irq(void)
{
	int irq;

	max_icu_nr = 1;
	mmp_icu_base = ioremap(0xd4282000, 0x1000);
	icu_data[0].conf_enable = mmp_conf.conf_enable;
	icu_data[0].conf_disable = mmp_conf.conf_disable;
	icu_data[0].conf_mask = mmp_conf.conf_mask;
	icu_data[0].nr_irqs = 64;
	icu_data[0].virq_base = 0;
	icu_data[0].domain = irq_domain_add_legacy(NULL, 64, 0, 0,
						   &irq_domain_simple_ops,
						   &icu_data[0]);
	for (irq = 0; irq < 64; irq++) {
		icu_mask_irq(irq_get_irq_data(irq));
		irq_set_chip_and_handler(irq, &icu_irq_chip, handle_level_irq);
		set_irq_flags(irq, IRQF_VALID);
	}
	irq_set_default_host(icu_data[0].domain);
#ifdef CONFIG_CPU_PXA910
	icu_irq_chip.irq_set_wake = pxa910_set_wake;
#endif
}
Exemple #27
0
void __init
pmac_pfm_init(void)
{
//    int i;
    struct device_node *irqctrler;
    const u32 *reg;
    unsigned long addr, size;
    unsigned char x;
    unsigned int flags = 0;

	irqctrler = of_find_node_by_name(NULL, "performa-intr");
	if (irqctrler) {
                reg = of_get_property(irqctrler, "reg", NULL);
                addr = reg[0];
                size = reg[1];
                pfm_irq.icr1 = (u8 __iomem *)ioremap(addr, size);
                addr = reg[2];
                size = reg[3];
                pfm_irq.icr2 = (u8 __iomem *)ioremap(addr, size);
                addr = reg[4];
                size = reg[5];
                pfm_irq.via1_flag = (u8 __iomem *)ioremap(addr, size);
                addr = reg[6];
                size = reg[7];
                pfm_irq.via1_ena = (u8 __iomem *)ioremap(addr, size);
                addr = reg[8];
                size = reg[9];
                pfm_irq.via1_pcr = (u8 __iomem *)ioremap(addr, size);
                addr = reg[10];
                size = reg[11];
                pfm_irq.slt1_flag = (u8 __iomem *)ioremap(addr, size);
                addr = reg[12];
                size = reg[13];
                pfm_irq.via2_flag = (u8 __iomem *)ioremap(addr, size);
                addr = reg[14];
                size = reg[15];
                pfm_irq.via2_ena = (u8 __iomem *)ioremap(addr, size);
                addr = reg[16];
                size = reg[17];
                pfm_irq.slt2_flag = (u8 __iomem *)ioremap(addr, size);
                addr = reg[18];
                size = reg[19];
                pfm_irq.f108_flag = (u8 __iomem *)ioremap(addr, size);
                addr &= ~3;
                pfm_irq.f108_ack = (u16 __iomem *)ioremap(addr, size);
	} else {
	    if (ppc_md.progress)
		ppc_md.progress("Failed to obtain Performa irq addresses/interrupts\n", 0);
		panic("Failed to obtain Performa irq addresses/interrupts\n");
	}

	/*
	 * SCC channel A control reg anc ESP SCSI status reg.  We ask device
	 * if it caused the irq.  There must be a better way..
	 */
	serial = (u8 __iomem *)ioremap(0x50f0c002,0x10);
	scsi = (u8 __iomem *)ioremap(0x50f10040, 0x10);

	/* disable all interrupts */
	out_be32((u32 __iomem *)pfm_irq.icr1, 1);
	x = in_8(pfm_irq.icr1);
	out_8(pfm_irq.icr1, 1);
	x = in_8(pfm_irq.icr1);
	out_8(pfm_irq.icr2, 7);
	x = in_8(pfm_irq.icr2);

	out_8(pfm_irq.via1_pcr, 0);
	out_8(pfm_irq.via1_ena, 0x7f);
	out_8(pfm_irq.via1_flag, 0x7f);
	out_8(pfm_irq.via2_flag, 0x7f);

	out_8(pfm_irq.f108_flag, 3);
	out_8(pfm_irq.f108_flag, 0xfc);

	f108_ena = via1_ena = via2_ena = slt1_ena = slt2_ena = 0;

	/* register root irq handler */
	//irq_desc[max_irqs].handler = &pfm_root;
	//request_irq(max_irqs, pfm_root_action, SA_INTERRUPT, "Performa", 0);
/*
	for (i = 0; i < max_irqs ; i++ )
		irq_desc[i].handler = &pmac_pfm;

	if( setup_irq(max_irqs-1, &pfm_root_action) != 0 )
	{
	    if (ppc_md.progress)
	        ppc_md.progress( "pmac_pfm_init(): Failed register root handler!\n",
                             0 );
	    printk( KERN_ERR "%s(): Failed register root handler!\n", __func__ );
	}
*/
        /* We configure the OF parsing based on our
         * platform type and wether we were booted by BootX.
         */
        flags |= OF_IMAP_OLDWORLD_MAC;
        if (of_get_property(of_chosen, "linux,bootx", NULL) != NULL)
                flags |= OF_IMAP_NO_PHANDLE;

        of_irq_map_init(flags);

        spin_lock_init(&pfm_lock);

        ppc_md.get_irq = pfm_get_irq;

        pmac_pfm_host = irq_alloc_host(irqctrler, IRQ_HOST_MAP_LINEAR, max_irqs,
                                       &pmac_pfm_host_ops,
                                       max_irqs);
        BUG_ON(pmac_pfm_host == NULL);
        irq_set_default_host(pmac_pfm_host);

        of_node_put(irqctrler);

	printk("System has %d possible interrupts\n", max_irqs);

//#ifdef CONFIG_XMON
//	/* fake irq line - 5 */
//	request_irq(5, xmon_irq, 0, "NMI - XMON", 0);
//	icr_ena |= 0x20;
//#endif	/* CONFIG_XMON */
}
/* MMP2 (ARMv7) */
void __init mmp2_init_icu(void)
{
	int irq;

	max_icu_nr = 8;
	mmp_icu_base = ioremap(0xd4282000, 0x1000);
	icu_data[0].conf_enable = mmp2_conf.conf_enable;
	icu_data[0].conf_disable = mmp2_conf.conf_disable;
	icu_data[0].conf_mask = mmp2_conf.conf_mask;
	icu_data[0].nr_irqs = 64;
	icu_data[0].virq_base = 0;
	icu_data[0].domain = irq_domain_add_legacy(NULL, 64, 0, 0,
						   &irq_domain_simple_ops,
						   &icu_data[0]);
	icu_data[1].reg_status = mmp_icu_base + 0x150;
	icu_data[1].reg_mask = mmp_icu_base + 0x168;
	icu_data[1].clr_mfp_irq_base = IRQ_MMP2_PMIC_BASE;
	icu_data[1].clr_mfp_hwirq = IRQ_MMP2_PMIC - IRQ_MMP2_PMIC_BASE;
	icu_data[1].nr_irqs = 2;
	icu_data[1].cascade_irq = 4;
	icu_data[1].virq_base = IRQ_MMP2_PMIC_BASE;
	icu_data[1].domain = irq_domain_add_legacy(NULL, icu_data[1].nr_irqs,
						   icu_data[1].virq_base, 0,
						   &irq_domain_simple_ops,
						   &icu_data[1]);
	icu_data[2].reg_status = mmp_icu_base + 0x154;
	icu_data[2].reg_mask = mmp_icu_base + 0x16c;
	icu_data[2].nr_irqs = 2;
	icu_data[2].cascade_irq = 5;
	icu_data[2].virq_base = IRQ_MMP2_RTC_BASE;
	icu_data[2].domain = irq_domain_add_legacy(NULL, icu_data[2].nr_irqs,
						   icu_data[2].virq_base, 0,
						   &irq_domain_simple_ops,
						   &icu_data[2]);
	icu_data[3].reg_status = mmp_icu_base + 0x180;
	icu_data[3].reg_mask = mmp_icu_base + 0x17c;
	icu_data[3].nr_irqs = 3;
	icu_data[3].cascade_irq = 9;
	icu_data[3].virq_base = IRQ_MMP2_KEYPAD_BASE;
	icu_data[3].domain = irq_domain_add_legacy(NULL, icu_data[3].nr_irqs,
						   icu_data[3].virq_base, 0,
						   &irq_domain_simple_ops,
						   &icu_data[3]);
	icu_data[4].reg_status = mmp_icu_base + 0x158;
	icu_data[4].reg_mask = mmp_icu_base + 0x170;
	icu_data[4].nr_irqs = 5;
	icu_data[4].cascade_irq = 17;
	icu_data[4].virq_base = IRQ_MMP2_TWSI_BASE;
	icu_data[4].domain = irq_domain_add_legacy(NULL, icu_data[4].nr_irqs,
						   icu_data[4].virq_base, 0,
						   &irq_domain_simple_ops,
						   &icu_data[4]);
	icu_data[5].reg_status = mmp_icu_base + 0x15c;
	icu_data[5].reg_mask = mmp_icu_base + 0x174;
	icu_data[5].nr_irqs = 15;
	icu_data[5].cascade_irq = 35;
	icu_data[5].virq_base = IRQ_MMP2_MISC_BASE;
	icu_data[5].domain = irq_domain_add_legacy(NULL, icu_data[5].nr_irqs,
						   icu_data[5].virq_base, 0,
						   &irq_domain_simple_ops,
						   &icu_data[5]);
	icu_data[6].reg_status = mmp_icu_base + 0x160;
	icu_data[6].reg_mask = mmp_icu_base + 0x178;
	icu_data[6].nr_irqs = 2;
	icu_data[6].cascade_irq = 51;
	icu_data[6].virq_base = IRQ_MMP2_MIPI_HSI1_BASE;
	icu_data[6].domain = irq_domain_add_legacy(NULL, icu_data[6].nr_irqs,
						   icu_data[6].virq_base, 0,
						   &irq_domain_simple_ops,
						   &icu_data[6]);
	icu_data[7].reg_status = mmp_icu_base + 0x188;
	icu_data[7].reg_mask = mmp_icu_base + 0x184;
	icu_data[7].nr_irqs = 2;
	icu_data[7].cascade_irq = 55;
	icu_data[7].virq_base = IRQ_MMP2_MIPI_HSI0_BASE;
	icu_data[7].domain = irq_domain_add_legacy(NULL, icu_data[7].nr_irqs,
						   icu_data[7].virq_base, 0,
						   &irq_domain_simple_ops,
						   &icu_data[7]);
	for (irq = 0; irq < IRQ_MMP2_MUX_END; irq++) {
		icu_mask_irq(irq_get_irq_data(irq));
		switch (irq) {
		case IRQ_MMP2_PMIC_MUX:
		case IRQ_MMP2_RTC_MUX:
		case IRQ_MMP2_KEYPAD_MUX:
		case IRQ_MMP2_TWSI_MUX:
		case IRQ_MMP2_MISC_MUX:
		case IRQ_MMP2_MIPI_HSI1_MUX:
		case IRQ_MMP2_MIPI_HSI0_MUX:
			irq_set_chip(irq, &icu_irq_chip);
			irq_set_chained_handler(irq, icu_mux_irq_demux);
			break;
		default:
			irq_set_chip_and_handler(irq, &icu_irq_chip,
						 handle_level_irq);
			break;
		}
		set_irq_flags(irq, IRQF_VALID);
	}
	irq_set_default_host(icu_data[0].domain);
#ifdef CONFIG_CPU_MMP2
	icu_irq_chip.irq_set_wake = mmp2_set_wake;
#endif
}
Exemple #29
0
static int __init imx_gpcv2_irqchip_init(struct device_node *node,
			       struct device_node *parent)
{
	struct irq_domain *parent_domain, *domain;
	struct gpcv2_irqchip_data *cd;
	int i;

	if (!parent) {
		pr_err("%s: no parent, giving up\n", node->full_name);
		return -ENODEV;
	}

	parent_domain = irq_find_host(parent);
	if (!parent_domain) {
		pr_err("%s: unable to get parent domain\n", node->full_name);
		return -ENXIO;
	}

	cd = kzalloc(sizeof(struct gpcv2_irqchip_data), GFP_KERNEL);
	if (!cd) {
		pr_err("kzalloc failed!\n");
		return -ENOMEM;
	}

	cd->gpc_base = of_iomap(node, 0);
	if (!cd->gpc_base) {
		pr_err("fsl-gpcv2: unable to map gpc registers\n");
		kfree(cd);
		return -ENOMEM;
	}

	domain = irq_domain_add_hierarchy(parent_domain, 0, GPC_MAX_IRQS,
				node, &gpcv2_irqchip_data_domain_ops, cd);
	if (!domain) {
		iounmap(cd->gpc_base);
		kfree(cd);
		return -ENOMEM;
	}
	irq_set_default_host(domain);

	/* Initially mask all interrupts */
	for (i = 0; i < IMR_NUM; i++) {
		writel_relaxed(~0, cd->gpc_base + GPC_IMR1_CORE0 + i * 4);
		writel_relaxed(~0, cd->gpc_base + GPC_IMR1_CORE1 + i * 4);
		cd->wakeup_sources[i] = ~0;
	}

	/* Let CORE0 as the default CPU to wake up by GPC */
	cd->cpu2wakeup = GPC_IMR1_CORE0;

	/*
	 * Due to hardware design failure, need to make sure GPR
	 * interrupt(#32) is unmasked during RUN mode to avoid entering
	 * DSM by mistake.
	 */
	writel_relaxed(~0x1, cd->gpc_base + cd->cpu2wakeup);

	imx_gpcv2_instance = cd;
	register_syscore_ops(&imx_gpcv2_syscore_ops);

	return 0;
}
static void __init xics_init_host(void)
{
	xics_host = irq_domain_add_tree(NULL, &xics_host_ops, NULL);
	BUG_ON(xics_host == NULL);
	irq_set_default_host(xics_host);
}