static void ia64_set_msi_irq_affinity(unsigned int irq, cpumask_t cpu_mask) { struct msi_msg msg; u32 addr, data; int cpu = first_cpu(cpu_mask); if (!cpu_online(cpu)) return; if (irq_prepare_move(irq, cpu)) return; read_msi_msg(irq, &msg); addr = msg.address_lo; addr &= MSI_ADDR_DESTID_MASK; addr |= MSI_ADDR_DESTID_CPU(cpu_physical_id(cpu)); msg.address_lo = addr; data = msg.data; data &= MSI_DATA_VECTOR_MASK; data |= MSI_DATA_VECTOR(irq_to_vector(irq)); msg.data = data; write_msi_msg(irq, &msg); irq_desc[irq].affinity = cpumask_of_cpu(cpu); }
int ia64_setup_msi_irq(struct pci_dev *pdev, struct msi_desc *desc) { struct msi_msg msg; unsigned long dest_phys_id; int irq, vector; cpumask_t mask; irq = create_irq(); if (irq < 0) return irq; set_irq_msi(irq, desc); cpus_and(mask, irq_to_domain(irq), cpu_online_map); dest_phys_id = cpu_physical_id(first_cpu(mask)); vector = irq_to_vector(irq); msg.address_hi = 0; msg.address_lo = MSI_ADDR_HEADER | MSI_ADDR_DESTMODE_PHYS | MSI_ADDR_REDIRECTION_CPU | MSI_ADDR_DESTID_CPU(dest_phys_id); msg.data = MSI_DATA_TRIGGER_EDGE | MSI_DATA_LEVEL_ASSERT | MSI_DATA_DELIVERY_FIXED | MSI_DATA_VECTOR(vector); write_msi_msg(irq, &msg); set_irq_chip_and_handler(irq, &ia64_msi_chip, handle_edge_irq); return 0; }
static int ia64_msi_retrigger_irq(unsigned int irq) { unsigned int vector = irq_to_vector(irq); ia64_resend_irq(vector); return 1; }
static int ia64_msi_retrigger_irq(struct irq_data *data) { unsigned int vector = irq_to_vector(data->irq); ia64_resend_irq(vector); return 1; }
static int ia64_set_msi_irq_affinity(struct irq_data *idata, const cpumask_t *cpu_mask, bool force) { struct msi_msg msg; u32 addr, data; int cpu = cpumask_first_and(cpu_mask, cpu_online_mask); unsigned int irq = idata->irq; if (irq_prepare_move(irq, cpu)) return -1; __get_cached_msi_msg(idata->msi_desc, &msg); addr = msg.address_lo; addr &= MSI_ADDR_DEST_ID_MASK; addr |= MSI_ADDR_DEST_ID_CPU(cpu_physical_id(cpu)); msg.address_lo = addr; data = msg.data; data &= MSI_DATA_VECTOR_MASK; data |= MSI_DATA_VECTOR(irq_to_vector(irq)); msg.data = data; write_msi_msg(irq, &msg); cpumask_copy(idata->affinity, cpumask_of(cpu)); return 0; }
static void sn_check_intr(int irq, pcibr_intr_t intr) { unsigned long regval; int irr_reg_num; int irr_bit; unsigned long irr_reg; regval = intr->bi_soft->bs_base->p_int_status_64; irr_reg_num = irq_to_vector(irq) / 64; irr_bit = irq_to_vector(irq) % 64; switch (irr_reg_num) { case 0: irr_reg = ia64_get_irr0(); break; case 1: irr_reg = ia64_get_irr1(); break; case 2: irr_reg = ia64_get_irr2(); break; case 3: irr_reg = ia64_get_irr3(); break; } if (!test_bit(irr_bit, &irr_reg) ) { if (!test_bit(irq, pda.sn_soft_irr) ) { if (!test_bit(irq, pda.sn_in_service_ivecs) ) { regval &= 0xff; if (intr->bi_ibits & regval & intr->bi_last_intr) { regval &= ~(intr->bi_ibits & regval); pcibr_force_interrupt(intr); } } } } intr->bi_last_intr = regval; }
/* * Initialize vector_irq on a new cpu. This function must be called * with vector_lock held. */ void __setup_vector_irq(int cpu) { int irq, vector; /* Clear vector_irq */ for (vector = 0; vector < IA64_NUM_VECTORS; ++vector) per_cpu(vector_irq, cpu)[vector] = -1; /* Mark the inuse vectors */ for (irq = 0; irq < NR_IRQS; ++irq) { if (!cpu_isset(cpu, irq_cfg[irq].domain)) continue; vector = irq_to_vector(irq); per_cpu(vector_irq, cpu)[vector] = irq; } }
static void rthal_set_itv(void) { rthal_itm_next[rthal_processor_id()] = ia64_get_itc(); ia64_set_itv(irq_to_vector(rthal_tick_irq)); }