static int init_local_irq_data(void) { int irq; spin_lock(&local_irqs_type_lock); for (irq = 0; irq < NR_LOCAL_IRQS; irq++) { struct irq_desc *desc = irq_to_desc(irq); init_one_irq_desc(desc); desc->irq = irq; desc->action = NULL; /* PPIs are included in local_irqs, we copy the IRQ type from * local_irqs_type when bringing up local IRQ for this CPU in * order to pick up any configuration done before this CPU came * up. For interrupts configured after this point this is done in * irq_set_type. */ desc->arch.type = local_irqs_type[irq]; } spin_unlock(&local_irqs_type_lock); return 0; }
static int __init init_irq_data(void) { int irq; for (irq = NR_LOCAL_IRQS; irq < NR_IRQS; irq++) { struct irq_desc *desc = irq_to_desc(irq); init_one_irq_desc(desc); desc->irq = irq; desc->action = NULL; } return 0; }
struct irq_desc * __ref irq_to_desc_alloc_node(unsigned int irq, int node) { struct irq_desc *desc; unsigned long flags; if (irq >= nr_irqs) { WARN(1, "irq (%d) >= nr_irqs (%d) in irq_to_desc_alloc\n", irq, nr_irqs); return NULL; } desc = irq_desc_ptrs[irq]; if (desc) return desc; spin_lock_irqsave(&sparse_irq_lock, flags); /* We have to check it to avoid races with another CPU */ desc = irq_desc_ptrs[irq]; if (desc) goto out_unlock; if (slab_is_available()) desc = kzalloc_node(sizeof(*desc), GFP_ATOMIC, node); else desc = alloc_bootmem_node(NODE_DATA(node), sizeof(*desc)); printk(KERN_DEBUG " alloc irq_desc for %d on node %d\n", irq, node); if (!desc) { printk(KERN_ERR "can not alloc irq_desc\n"); BUG_ON(1); } init_one_irq_desc(irq, desc, node); irq_desc_ptrs[irq] = desc; out_unlock: spin_unlock_irqrestore(&sparse_irq_lock, flags); return desc; }
struct irq_desc *irq_to_desc_alloc_cpu(unsigned int irq, int cpu) { struct irq_desc *desc; unsigned long flags; int node; if (irq >= nr_irqs) { WARN(1, "irq (%d) >= nr_irqs (%d) in irq_to_desc_alloc\n", irq, nr_irqs); return NULL; } desc = irq_desc_ptrs[irq]; if (desc) return desc; spin_lock_irqsave(&sparse_irq_lock, flags); /* We have to check it to avoid races with another CPU */ desc = irq_desc_ptrs[irq]; if (desc) goto out_unlock; node = cpu_to_node(cpu); desc = kzalloc_node(sizeof(*desc), GFP_ATOMIC, node); printk(KERN_DEBUG " alloc irq_desc for %d on cpu %d node %d\n", irq, cpu, node); if (!desc) { printk(KERN_ERR "can not alloc irq_desc\n"); BUG_ON(1); } init_one_irq_desc(irq, desc, cpu); irq_desc_ptrs[irq] = desc; out_unlock: spin_unlock_irqrestore(&sparse_irq_lock, flags); return desc; }
struct irq_desc * __ref irq_to_desc_alloc_node(unsigned int irq, int node) { struct irq_desc *desc; unsigned long flags; if (irq >= nr_irqs) { WARN(1, "irq (%d) >= nr_irqs (%d) in irq_to_desc_alloc\n", irq, nr_irqs); return NULL; } desc = irq_to_desc(irq); if (desc) return desc; raw_spin_lock_irqsave(&sparse_irq_lock, flags); /* We have to check it to avoid races with another CPU */ desc = irq_to_desc(irq); if (desc) goto out_unlock; desc = kzalloc_node(sizeof(*desc), GFP_ATOMIC, node); printk(KERN_DEBUG " alloc irq_desc for %d on node %d\n", irq, node); if (!desc) { printk(KERN_ERR "can not alloc irq_desc\n"); BUG_ON(1); } init_one_irq_desc(irq, desc, node); set_irq_desc(irq, desc); out_unlock: raw_spin_unlock_irqrestore(&sparse_irq_lock, flags); return desc; }