static int uv_set_irq_2_mmr_info(int irq, unsigned long offset, unsigned blade) { struct rb_node **link = &uv_irq_root.rb_node; struct rb_node *parent = NULL; struct uv_irq_2_mmr_pnode *n; struct uv_irq_2_mmr_pnode *e; unsigned long irqflags; n = kmalloc_node(sizeof(struct uv_irq_2_mmr_pnode), GFP_KERNEL, uv_blade_to_memory_nid(blade)); if (!n) return -ENOMEM; n->irq = irq; n->offset = offset; n->pnode = uv_blade_to_pnode(blade); spin_lock_irqsave(&uv_irq_lock, irqflags); while (*link) { parent = *link; e = rb_entry(parent, struct uv_irq_2_mmr_pnode, list); if (unlikely(irq == e->irq)) { e->pnode = uv_blade_to_pnode(blade); e->offset = offset; spin_unlock_irqrestore(&uv_irq_lock, irqflags); kfree(n); return 0; } if (irq < e->irq) link = &(*link)->rb_left; else link = &(*link)->rb_right; } rb_link_node(&n->list, parent, link); rb_insert_color(&n->list, &uv_irq_root); spin_unlock_irqrestore(&uv_irq_lock, irqflags); return 0; }
int uv_setup_irq(char *irq_name, int cpu, int mmr_blade, unsigned long mmr_offset, int limit) { int irq, ret; irq = create_irq_nr(NR_IRQS_LEGACY, uv_blade_to_memory_nid(mmr_blade)); if (irq <= 0) return -EBUSY; ret = arch_enable_uv_irq(irq_name, irq, cpu, mmr_blade, mmr_offset, limit); if (ret == irq) uv_set_irq_2_mmr_info(irq, mmr_offset, mmr_blade); else destroy_irq(irq); return ret; }
/* * Set up a mapping of an available irq and vector, and enable the specified * MMR that defines the MSI that is to be sent to the specified CPU when an * interrupt is raised. */ int uv_setup_irq(char *irq_name, int cpu, int mmr_blade, unsigned long mmr_offset, int limit) { struct irq_alloc_info info; struct irq_domain *domain = uv_get_irq_domain(); if (!domain) return -ENOMEM; init_irq_alloc_info(&info, cpumask_of(cpu)); info.type = X86_IRQ_ALLOC_TYPE_UV; info.uv_limit = limit; info.uv_blade = mmr_blade; info.uv_offset = mmr_offset; info.uv_name = irq_name; return irq_domain_alloc_irqs(domain, 1, uv_blade_to_memory_nid(mmr_blade), &info); }