void hubii_eint_init(cnodeid_t cnode) { int bit, rv; ii_iidsr_u_t hubio_eint; hubinfo_t hinfo; cpuid_t intr_cpu; devfs_handle_t hub_v; ii_ilcsr_u_t ilcsr; int bit_pos_to_irq(int bit); int synergy_intr_connect(int bit, int cpuid); hub_v = (devfs_handle_t)cnodeid_to_vertex(cnode); ASSERT_ALWAYS(hub_v); hubinfo_get(hub_v, &hinfo); ASSERT(hinfo); ASSERT(hinfo->h_cnodeid == cnode); ilcsr.ii_ilcsr_regval = REMOTE_HUB_L(hinfo->h_nasid, IIO_ILCSR); if ((ilcsr.ii_ilcsr_fld_s.i_llp_stat & 0x2) == 0) { /* * HUB II link is not up. * Just disable LLP, and don't connect any interrupts. */ ilcsr.ii_ilcsr_fld_s.i_llp_en = 0; REMOTE_HUB_S(hinfo->h_nasid, IIO_ILCSR, ilcsr.ii_ilcsr_regval); return; } /* Select a possible interrupt target where there is a free interrupt * bit and also reserve the interrupt bit for this IO error interrupt */ intr_cpu = intr_heuristic(hub_v,0,INTRCONNECT_ANYBIT,II_ERRORINT,hub_v, "HUB IO error interrupt",&bit); if (intr_cpu == CPU_NONE) { printk("hubii_eint_init: intr_reserve_level failed, cnode %d", cnode); return; } rv = intr_connect_level(intr_cpu, bit, 0, NULL); synergy_intr_connect(bit, intr_cpu); request_irq(bit_pos_to_irq(bit) + (intr_cpu << 8), hubii_eint_handler, 0, "SN hub error", (void *)hub_v); ASSERT_ALWAYS(rv >= 0); hubio_eint.ii_iidsr_regval = 0; hubio_eint.ii_iidsr_fld_s.i_enable = 1; hubio_eint.ii_iidsr_fld_s.i_level = bit;/* Take the least significant bits*/ hubio_eint.ii_iidsr_fld_s.i_node = COMPACT_TO_NASID_NODEID(cnode); hubio_eint.ii_iidsr_fld_s.i_pi_id = cpuid_to_subnode(intr_cpu); REMOTE_HUB_S(hinfo->h_nasid, IIO_IIDSR, hubio_eint.ii_iidsr_regval); }
void hubii_eint_init(cnodeid_t cnode) { int bit, rv; ii_iidsr_u_t hubio_eint; hubinfo_t hinfo; cpuid_t intr_cpu; vertex_hdl_t hub_v; int bit_pos_to_irq(int bit); ii_ilcsr_u_t ilcsr; hub_v = (vertex_hdl_t)cnodeid_to_vertex(cnode); ASSERT_ALWAYS(hub_v); hubinfo_get(hub_v, &hinfo); ASSERT(hinfo); ASSERT(hinfo->h_cnodeid == cnode); ilcsr.ii_ilcsr_regval = REMOTE_HUB_L(hinfo->h_nasid, IIO_ILCSR); if ((ilcsr.ii_ilcsr_fld_s.i_llp_stat & 0x2) == 0) { /* * HUB II link is not up. Disable LLP. Clear old errors. * Enable interrupts to handle BTE errors. */ ilcsr.ii_ilcsr_fld_s.i_llp_en = 0; REMOTE_HUB_S(hinfo->h_nasid, IIO_ILCSR, ilcsr.ii_ilcsr_regval); } /* Select a possible interrupt target where there is a free interrupt * bit and also reserve the interrupt bit for this IO error interrupt */ intr_cpu = intr_heuristic(hub_v,0,SGI_II_ERROR,0,hub_v, "HUB IO error interrupt",&bit); if (intr_cpu == CPU_NONE) { printk("hubii_eint_init: intr_reserve_level failed, cnode %d", cnode); return; } rv = intr_connect_level(intr_cpu, SGI_II_ERROR, 0, NULL); request_irq(SGI_II_ERROR, hubii_eint_handler, SA_SHIRQ, "SN_hub_error", (void *)hub_v); irq_desc(bit)->status |= SN2_IRQ_PER_HUB; ASSERT_ALWAYS(rv >= 0); hubio_eint.ii_iidsr_regval = 0; hubio_eint.ii_iidsr_fld_s.i_enable = 1; hubio_eint.ii_iidsr_fld_s.i_level = bit;/* Take the least significant bits*/ hubio_eint.ii_iidsr_fld_s.i_node = COMPACT_TO_NASID_NODEID(cnode); hubio_eint.ii_iidsr_fld_s.i_pi_id = cpuid_to_subnode(intr_cpu); REMOTE_HUB_S(hinfo->h_nasid, IIO_IIDSR, hubio_eint.ii_iidsr_regval); }
/* * Set up the platform-dependent fields in the processor pda. * Must be done _after_ init_platform_nodepda(). * If we need a lock here, something else is wrong! */ void init_platform_pda(cpuid_t cpu) { #if defined(CONFIG_IA64_SGI_SN1) hub_intmasks_t *intmasks; int i, subnode; cnodeid_t cnode; synergy_da_t *sda; int which_synergy; cnode = cpuid_to_cnodeid(cpu); which_synergy = cpuid_to_synergy(cpu); sda = Synergy_da_indr[(cnode * 2) + which_synergy]; intmasks = &sda->s_intmasks; /* Clear INT_PEND0 masks. */ for (i = 0; i < N_INTPEND0_MASKS; i++) intmasks->intpend0_masks[i] = 0; /* Set up pointer to the vector block in the nodepda. */ /* (Cant use SUBNODEPDA - not working yet) */ subnode = cpuid_to_subnode(cpu); intmasks->dispatch0 = &NODEPDA(cnode)->snpda[cpuid_to_subnode(cpu)].intr_dispatch0; intmasks->dispatch1 = &NODEPDA(cnode)->snpda[cpuid_to_subnode(cpu)].intr_dispatch1; if (intmasks->dispatch0 != &SUBNODEPDA(cnode, subnode)->intr_dispatch0 || intmasks->dispatch1 != &SUBNODEPDA(cnode, subnode)->intr_dispatch1) panic("xxx"); intmasks->dispatch0 = &SUBNODEPDA(cnode, subnode)->intr_dispatch0; intmasks->dispatch1 = &SUBNODEPDA(cnode, subnode)->intr_dispatch1; /* Clear INT_PEND1 masks. */ for (i = 0; i < N_INTPEND1_MASKS; i++) intmasks->intpend1_masks[i] = 0; #endif /* CONFIG_IA64_SGI_SN1 */ }
static void sn_end_irq(unsigned int irq) { #ifdef CONFIG_IA64_SGI_SN1 unsigned long long intpend_val, mask = 0x70L; int subnode; #endif int nasid; #ifdef CONFIG_IA64_SGI_SN2 unsigned long event_occurred; #endif irq = irq & 0xff; #ifdef CONFIG_IA64_SGI_SN1 if (irq == SGI_UART_IRQ) { nasid = smp_physical_node_id(); subnode = cpuid_to_subnode(smp_processor_id()); intpend_val = REMOTE_HUB_PI_L(nasid, subnode, PI_INT_PEND0); if (intpend_val & mask) { platform_send_ipi(smp_processor_id(), SGI_UART_IRQ, IA64_IPI_DM_INT, 0); } } #endif #ifdef CONFIG_IA64_SGI_SN2 if (irq == SGI_UART_VECTOR) { nasid = smp_physical_node_id(); event_occurred = HUB_L( (unsigned long *)GLOBAL_MMR_ADDR(nasid,SH_EVENT_OCCURRED) ); // If the UART bit is set here, we may have received an interrupt from the // UART that the driver missed. To make sure, we IPI ourselves to force us // to look again. if (event_occurred & SH_EVENT_OCCURRED_UART_INT_MASK) { platform_send_ipi(smp_processor_id(), SGI_UART_VECTOR, IA64_IPI_DM_INT, 0); } } #endif }
static void sn_ack_irq(unsigned int irq) { #ifdef CONFIG_IA64_SGI_SN1 int bit = -1; unsigned long long intpend_val; int subnode; #endif #ifdef CONFIG_IA64_SGI_SN2 unsigned long event_occurred, mask = 0; #endif int nasid; irq = irq & 0xff; nasid = smp_physical_node_id(); #ifdef CONFIG_IA64_SGI_SN1 subnode = cpuid_to_subnode(smp_processor_id()); if (irq == SGI_UART_IRQ) { intpend_val = REMOTE_HUB_PI_L(nasid, subnode, PI_INT_PEND0); if (intpend_val & (1L<<GFX_INTR_A) ) { bit = GFX_INTR_A; REMOTE_HUB_PI_CLR_INTR(nasid, subnode, bit); } if ( intpend_val & (1L<<GFX_INTR_B) ) { bit = GFX_INTR_B; REMOTE_HUB_PI_CLR_INTR(nasid, subnode, bit); } if (intpend_val & (1L<<PG_MIG_INTR) ) { bit = PG_MIG_INTR; REMOTE_HUB_PI_CLR_INTR(nasid, subnode, bit); } if (intpend_val & (1L<<CC_PEND_A)) { bit = CC_PEND_A; REMOTE_HUB_PI_CLR_INTR(nasid, subnode, bit); } if (intpend_val & (1L<<CC_PEND_B)) { bit = CC_PEND_B; REMOTE_HUB_PI_CLR_INTR(nasid, subnode, bit); } return; } bit = irq_to_bit_pos(irq); REMOTE_HUB_PI_CLR_INTR(nasid, subnode, bit); #endif #ifdef CONFIG_IA64_SGI_SN2 event_occurred = HUB_L( (unsigned long *)GLOBAL_MMR_ADDR(nasid,SH_EVENT_OCCURRED) ); if (event_occurred & SH_EVENT_OCCURRED_UART_INT_MASK) { mask |= (1 << SH_EVENT_OCCURRED_UART_INT_SHFT); } if (event_occurred & SH_EVENT_OCCURRED_IPI_INT_MASK) { mask |= (1 << SH_EVENT_OCCURRED_IPI_INT_SHFT); } if (event_occurred & SH_EVENT_OCCURRED_II_INT0_MASK) { mask |= (1 << SH_EVENT_OCCURRED_II_INT0_SHFT); } if (event_occurred & SH_EVENT_OCCURRED_II_INT1_MASK) { mask |= (1 << SH_EVENT_OCCURRED_II_INT1_SHFT); } HUB_S((unsigned long *)GLOBAL_MMR_ADDR(nasid, SH_EVENT_OCCURRED_ALIAS), mask ); #endif }
/* * Allocate resources required for an interrupt as specified in dev_desc. * Returns a hub interrupt handle on success, or 0 on failure. */ static hub_intr_t do_hub_intr_alloc(devfs_handle_t dev, /* which crosstalk device */ device_desc_t dev_desc, /* device descriptor */ devfs_handle_t owner_dev, /* owner of this interrupt, if known */ int uncond_nothread) /* unconditionally non-threaded */ { cpuid_t cpu = (cpuid_t)0; /* cpu to receive interrupt */ int cpupicked = 0; int bit; /* interrupt vector */ /*REFERENCED*/ int intr_resflags = 0; hub_intr_t intr_hdl; cnodeid_t nodeid; /* node to receive interrupt */ /*REFERENCED*/ nasid_t nasid; /* nasid to receive interrupt */ struct xtalk_intr_s *xtalk_info; iopaddr_t xtalk_addr; /* xtalk addr on hub to set intr */ xwidget_info_t xwidget_info; /* standard crosstalk widget info handle */ char *intr_name = NULL; ilvl_t intr_swlevel = (ilvl_t)0; extern int default_intr_pri; extern void synergy_intr_alloc(int, int); if (dev_desc) { if (dev_desc->flags & D_INTR_ISERR) { intr_resflags = II_ERRORINT; } else if (!uncond_nothread && !(dev_desc->flags & D_INTR_NOTHREAD)) { intr_resflags = II_THREADED; } else { /* Neither an error nor a thread. */ intr_resflags = 0; } } else { intr_swlevel = default_intr_pri; if (!uncond_nothread) intr_resflags = II_THREADED; } /* XXX - Need to determine if the interrupt should be threaded. */ /* If the cpu has not been picked already then choose a candidate * interrupt target and reserve the interrupt bit */ if (!cpupicked) { cpu = intr_heuristic(dev,dev_desc,allocate_my_bit, intr_resflags,owner_dev, intr_name,&bit); } /* At this point we SHOULD have a valid cpu */ if (cpu == CPU_NONE) { #if defined(SUPPORT_PRINTING_V_FORMAT) printk(KERN_WARNING "%v hub_intr_alloc could not allocate interrupt\n", owner_dev); #else printk(KERN_WARNING "%p hub_intr_alloc could not allocate interrupt\n", (void *)owner_dev); #endif return(0); } /* If the cpu has been picked already (due to the bridge data * corruption bug) then try to reserve an interrupt bit . */ if (cpupicked) { bit = intr_reserve_level(cpu, allocate_my_bit, intr_resflags, owner_dev, intr_name); if (bit < 0) { #if defined(SUPPORT_PRINTING_V_FORMAT) printk(KERN_WARNING "Could not reserve an interrupt bit for cpu " " %d and dev %v\n", cpu,owner_dev); #else printk(KERN_WARNING "Could not reserve an interrupt bit for cpu " " %d and dev %p\n", (int)cpu, (void *)owner_dev); #endif return(0); } } nodeid = cpuid_to_cnodeid(cpu); nasid = cpuid_to_nasid(cpu); xtalk_addr = HUBREG_AS_XTALKADDR(nasid, PIREG(PI_INT_PEND_MOD, cpuid_to_subnode(cpu))); /* * Allocate an interrupt handle, and fill it in. There are two * pieces to an interrupt handle: the piece needed by generic * xtalk code which is used by crosstalk device drivers, and * the piece needed by low-level IP27 hardware code. */ intr_hdl = snia_kmem_alloc_node(sizeof(struct hub_intr_s), KM_NOSLEEP, nodeid); ASSERT_ALWAYS(intr_hdl); /* * Fill in xtalk information for generic xtalk interfaces that * operate on xtalk_intr_hdl's. */ xtalk_info = &intr_hdl->i_xtalk_info; xtalk_info->xi_dev = dev; xtalk_info->xi_vector = bit; xtalk_info->xi_addr = xtalk_addr; /* * Regardless of which CPU we ultimately interrupt, a given crosstalk * widget always handles interrupts (and PIO and DMA) through its * designated "master" crosstalk provider. */ xwidget_info = xwidget_info_get(dev); if (xwidget_info) xtalk_info->xi_target = xwidget_info_masterid_get(xwidget_info); /* Fill in low level hub information for hub_* interrupt interface */ intr_hdl->i_swlevel = intr_swlevel; intr_hdl->i_cpuid = cpu; intr_hdl->i_bit = bit; intr_hdl->i_flags = HUB_INTR_IS_ALLOCED; /* Store the actual interrupt priority level & interrupt target * cpu back in the device descriptor. */ hub_device_desc_update(dev_desc, intr_swlevel, cpu); synergy_intr_alloc((int)bit, (int)cpu); return(intr_hdl); }