void __cpuinit per_cpu_init(void) { int cpu = smp_processor_id(); int slice = LOCAL_HUB_L(PI_CPU_NUM); cnodeid_t cnode = get_compact_nodeid(); struct hub_data *hub = hub_data(cnode); struct slice_data *si = hub->slice + slice; int i; if (test_and_set_bit(slice, &hub->slice_map)) return; clear_c0_status(ST0_IM); per_hub_init(cnode); for (i = 0; i < LEVELS_PER_SLICE; i++) si->level_to_irq[i] = -1; /* * We use this so we can find the local hub's data as fast as only * possible. */ cpu_data[cpu].data = si; cpu_time_init(); install_ipi(); /* Install our NMI handler if symmon hasn't installed one. */ install_cpu_nmi_handler(cputoslice(cpu)); set_c0_status(SRB_DEV0 | SRB_DEV1); }
int request_bridge_irq(struct bridge_controller *bc, int pin) { struct hub_irq_data *hd; struct hub_data *hub; struct irq_desc *desc; int swlevel; int irq; hd = kzalloc(sizeof(*hd), GFP_KERNEL); if (!hd) return -ENOMEM; swlevel = alloc_level(); if (unlikely(swlevel < 0)) { kfree(hd); return -EAGAIN; } irq = swlevel + IP27_HUB_IRQ_BASE; hd->bc = bc; hd->bit = swlevel; hd->pin = pin; irq_set_chip_data(irq, hd); /* use CPU connected to nearest hub */ hub = hub_data(NASID_TO_COMPACT_NODEID(bc->nasid)); setup_hub_mask(hd, &hub->h_cpus); desc = irq_to_desc(irq); desc->irq_common_data.node = bc->nasid; cpumask_copy(desc->irq_common_data.affinity, &hub->h_cpus); return irq; }
void __cpuinit per_cpu_init(void) { int cpu = smp_processor_id(); int slice = LOCAL_HUB_L(PI_CPU_NUM); cnodeid_t cnode = get_compact_nodeid(); struct hub_data *hub = hub_data(cnode); struct slice_data *si = hub->slice + slice; int i; if (test_and_set_bit(slice, &hub->slice_map)) return; clear_c0_status(ST0_IM); per_hub_init(cnode); for (i = 0; i < LEVELS_PER_SLICE; i++) si->level_to_irq[i] = -1; /* */ cpu_data[cpu].data = si; cpu_time_init(); install_ipi(); /* */ install_cpu_nmi_handler(cputoslice(cpu)); set_c0_status(SRB_DEV0 | SRB_DEV1); }
static void __cpuinit per_hub_init(cnodeid_t cnode) { struct hub_data *hub = hub_data(cnode); nasid_t nasid = COMPACT_TO_NASID_NODEID(cnode); int i; cpu_set(smp_processor_id(), hub->h_cpus); if (test_and_set_bit(cnode, hub_init_mask)) return; /* * Set CRB timeout at 5ms, (< PI timeout of 10ms) */ REMOTE_HUB_S(nasid, IIO_ICTP, 0x800); REMOTE_HUB_S(nasid, IIO_ICTO, 0xff); hub_rtc_init(cnode); xtalk_probe_node(cnode); #ifdef CONFIG_REPLICATE_EXHANDLERS /* * If this is not a headless node initialization, * copy over the caliased exception handlers. */ if (get_compact_nodeid() == cnode) { extern char except_vec2_generic, except_vec3_generic; extern void build_tlb_refill_handler(void); memcpy((void *)(CKSEG0 + 0x100), &except_vec2_generic, 0x80); memcpy((void *)(CKSEG0 + 0x180), &except_vec3_generic, 0x80); build_tlb_refill_handler(); memcpy((void *)(CKSEG0 + 0x100), (void *) CKSEG0, 0x80); memcpy((void *)(CKSEG0 + 0x180), &except_vec3_generic, 0x100); __flush_cache_all(); } #endif /* * Some interrupts are reserved by hardware or by software convention. * Mark these as reserved right away so they won't be used accidently * later. */ for (i = 0; i <= BASE_PCI_IRQ; i++) { __set_bit(i, hub->irq_alloc_mask); LOCAL_HUB_CLR_INTR(INT_PEND0_BASELVL + i); } __set_bit(IP_PEND0_6_63, hub->irq_alloc_mask); LOCAL_HUB_S(PI_INT_PEND_MOD, IP_PEND0_6_63); for (i = NI_BRDCAST_ERR_A; i <= MSC_PANIC_INTR; i++) { __set_bit(i, hub->irq_alloc_mask); LOCAL_HUB_CLR_INTR(INT_PEND1_BASELVL + i); } }
static void __cpuinit per_hub_init(cnodeid_t cnode) { struct hub_data *hub = hub_data(cnode); nasid_t nasid = COMPACT_TO_NASID_NODEID(cnode); int i; cpu_set(smp_processor_id(), hub->h_cpus); if (test_and_set_bit(cnode, hub_init_mask)) return; /* */ REMOTE_HUB_S(nasid, IIO_ICTP, 0x800); REMOTE_HUB_S(nasid, IIO_ICTO, 0xff); hub_rtc_init(cnode); xtalk_probe_node(cnode); #ifdef CONFIG_REPLICATE_EXHANDLERS /* */ if (get_compact_nodeid() == cnode) { extern char except_vec2_generic, except_vec3_generic; extern void build_tlb_refill_handler(void); memcpy((void *)(CKSEG0 + 0x100), &except_vec2_generic, 0x80); memcpy((void *)(CKSEG0 + 0x180), &except_vec3_generic, 0x80); build_tlb_refill_handler(); memcpy((void *)(CKSEG0 + 0x100), (void *) CKSEG0, 0x80); memcpy((void *)(CKSEG0 + 0x180), &except_vec3_generic, 0x100); __flush_cache_all(); } #endif /* */ for (i = 0; i <= BASE_PCI_IRQ; i++) { __set_bit(i, hub->irq_alloc_mask); LOCAL_HUB_CLR_INTR(INT_PEND0_BASELVL + i); } __set_bit(IP_PEND0_6_63, hub->irq_alloc_mask); LOCAL_HUB_S(PI_INT_PEND_MOD, IP_PEND0_6_63); for (i = NI_BRDCAST_ERR_A; i <= MSC_PANIC_INTR; i++) { __set_bit(i, hub->irq_alloc_mask); LOCAL_HUB_CLR_INTR(INT_PEND1_BASELVL + i); } }
/* * hub_pio_init - PIO-related hub initialization * * @hub: hubinfo structure for our hub */ void hub_pio_init(cnodeid_t cnode) { nasid_t nasid = COMPACT_TO_NASID_NODEID(cnode); unsigned i; /* initialize big window piomaps for this hub */ bitmap_zero(hub_data(cnode)->h_bigwin_used, HUB_NUM_BIG_WINDOW); for (i = 0; i < HUB_NUM_BIG_WINDOW; i++) IIO_ITTE_DISABLE(nasid, i); hub_set_piomode(nasid); }
void __init per_cpu_init(void) { int cpu = smp_processor_id(); int slice = LOCAL_HUB_L(PI_CPU_NUM); cnodeid_t cnode = get_compact_nodeid(); struct hub_data *hub = hub_data(cnode); struct slice_data *si = hub->slice + slice; int i; if (test_and_set_bit(slice, &hub->slice_map)) return; clear_c0_status(ST0_IM); for (i = 0; i < LEVELS_PER_SLICE; i++) si->level_to_irq[i] = -1; /* * Some interrupts are reserved by hardware or by software convention. * Mark these as reserved right away so they won't be used accidently * later. */ for (i = 0; i <= BASE_PCI_IRQ; i++) { __set_bit(i, si->irq_alloc_mask); LOCAL_HUB_S(PI_INT_PEND_MOD, i); } __set_bit(IP_PEND0_6_63, si->irq_alloc_mask); LOCAL_HUB_S(PI_INT_PEND_MOD, IP_PEND0_6_63); for (i = NI_BRDCAST_ERR_A; i <= MSC_PANIC_INTR; i++) { __set_bit(i, si->irq_alloc_mask + 1); LOCAL_HUB_S(PI_INT_PEND_MOD, i); } LOCAL_HUB_L(PI_INT_PEND0); /* * We use this so we can find the local hub's data as fast as only * possible. */ cpu_data[cpu].data = si; cpu_time_init(); install_ipi(); /* Install our NMI handler if symmon hasn't installed one. */ install_cpu_nmi_handler(cputoslice(cpu)); set_c0_status(SRB_DEV0 | SRB_DEV1); per_hub_init(cnode); }
/** * hub_pio_map - establish a HUB PIO mapping * * @hub: hub to perform PIO mapping on * @widget: widget ID to perform PIO mapping for * @xtalk_addr: xtalk_address that needs to be mapped * @size: size of the PIO mapping * **/ unsigned long hub_pio_map(cnodeid_t cnode, xwidgetnum_t widget, unsigned long xtalk_addr, size_t size) { nasid_t nasid = COMPACT_TO_NASID_NODEID(cnode); volatile hubreg_t junk; unsigned i; /* use small-window mapping if possible */ if ((xtalk_addr % SWIN_SIZE) + size <= SWIN_SIZE) return NODE_SWIN_BASE(nasid, widget) + (xtalk_addr % SWIN_SIZE); if ((xtalk_addr % BWIN_SIZE) + size > BWIN_SIZE) { printk(KERN_WARNING "PIO mapping at hub %d widget %d addr 0x%lx" " too big (%ld)\n", nasid, widget, xtalk_addr, size); return 0; } xtalk_addr &= ~(BWIN_SIZE-1); for (i = 0; i < HUB_NUM_BIG_WINDOW; i++) { if (test_and_set_bit(i, hub_data(cnode)->h_bigwin_used)) continue; /* * The code below does a PIO write to setup an ITTE entry. * * We need to prevent other CPUs from seeing our updated * memory shadow of the ITTE (in the piomap) until the ITTE * entry is actually set up; otherwise, another CPU might * attempt a PIO prematurely. * * Also, the only way we can know that an entry has been * received by the hub and can be used by future PIO reads/ * writes is by reading back the ITTE entry after writing it. * * For these two reasons, we PIO read back the ITTE entry * after we write it. */ IIO_ITTE_PUT(nasid, i, HUB_PIO_MAP_TO_MEM, widget, xtalk_addr); junk = HUB_L(IIO_ITTE_GET(nasid, i)); return NODE_BWIN_BASE(nasid, widget) + (xtalk_addr % BWIN_SIZE); } printk(KERN_WARNING "unable to establish PIO mapping for at" " hub %d widget %d addr 0x%lx\n", nasid, widget, xtalk_addr); return 0; }
static inline int alloc_level(int cpu, int irq) { struct hub_data *hub = hub_data(cpu_to_node(cpu)); struct slice_data *si = cpu_data[cpu].data; int level; level = find_first_zero_bit(hub->irq_alloc_mask, LEVELS_PER_SLICE); if (level >= LEVELS_PER_SLICE) panic("Cpu %d flooded with devices", cpu); __set_bit(level, hub->irq_alloc_mask); si->level_to_irq[level] = irq; return level; }
static __init void set_ktext_source(nasid_t client_nasid, nasid_t server_nasid) { kern_vars_t *kvp; kvp = &hub_data(client_nasid)->kern_vars; KERN_VARS_ADDR(client_nasid) = (unsigned long)kvp; kvp->kv_magic = KV_MAGIC; kvp->kv_ro_nasid = server_nasid; kvp->kv_rw_nasid = master_nasid; kvp->kv_ro_baseaddr = NODE_CAC_BASE(server_nasid); kvp->kv_rw_baseaddr = NODE_CAC_BASE(master_nasid); printk("REPLICATION: ON nasid %d, ktext from nasid %d, kdata from nasid %d\n", client_nasid, server_nasid, master_nasid); }
static void __init per_hub_init(cnodeid_t cnode) { struct hub_data *hub = hub_data(cnode); nasid_t nasid = COMPACT_TO_NASID_NODEID(cnode); cpu_set(smp_processor_id(), hub->h_cpus); if (test_and_set_bit(cnode, hub_init_mask)) return; /* * Set CRB timeout at 5ms, (< PI timeout of 10ms) */ REMOTE_HUB_S(nasid, IIO_ICTP, 0x800); REMOTE_HUB_S(nasid, IIO_ICTO, 0xff); hub_rtc_init(cnode); xtalk_probe_node(cnode); #ifdef CONFIG_REPLICATE_EXHANDLERS /* * If this is not a headless node initialization, * copy over the caliased exception handlers. */ if (get_compact_nodeid() == cnode) { extern char except_vec2_generic, except_vec3_generic; extern void build_tlb_refill_handler(void); memcpy((void *)(CKSEG0 + 0x100), &except_vec2_generic, 0x80); memcpy((void *)(CKSEG0 + 0x180), &except_vec3_generic, 0x80); build_tlb_refill_handler(); memcpy((void *)(CKSEG0 + 0x100), (void *) CKSEG0, 0x80); memcpy((void *)(CKSEG0 + 0x180), &except_vec3_generic, 0x100); __flush_cache_all(); } #endif }