/** * mmtimer_init - device initialization routine * * Does initial setup for the mmtimer device. */ static int __init mmtimer_init(void) { if (!ia64_platform_is("sn2")) return -1; /* * Sanity check the cycles/sec variable */ if (sn_rtc_cycles_per_second < 100000) { printk(KERN_ERR "%s: unable to determine clock frequency\n", MMTIMER_NAME); return -1; } mmtimer_femtoperiod = ((unsigned long)1E15 + sn_rtc_cycles_per_second / 2) / sn_rtc_cycles_per_second; strcpy(mmtimer_miscdev.devfs_name, MMTIMER_NAME); if (misc_register(&mmtimer_miscdev)) { printk(KERN_ERR "%s: failed to register device\n", MMTIMER_NAME); return -1; } printk(KERN_INFO "%s: v%s, %ld MHz\n", MMTIMER_DESC, MMTIMER_VERSION, sn_rtc_cycles_per_second/(unsigned long)1E6); return 0; }
int __init prominfo_init(void) { struct proc_dir_entry **entp; cnodeid_t cnodeid; unsigned long nasid; int size; char name[NODE_NAME_LEN]; if (!ia64_platform_is("sn2")) return 0; size = num_online_nodes() * sizeof(struct proc_dir_entry *); proc_entries = kzalloc(size, GFP_KERNEL); if (!proc_entries) return -ENOMEM; sgi_prominfo_entry = proc_mkdir("sgi_prominfo", NULL); entp = proc_entries; for_each_online_node(cnodeid) { sprintf(name, "node%d", cnodeid); *entp = proc_mkdir(name, sgi_prominfo_entry); nasid = cnodeid_to_nasid(cnodeid); create_proc_read_entry("fit", 0, *entp, read_fit_entry, (void *)nasid); create_proc_read_entry("version", 0, *entp, read_version_entry, (void *)nasid); entp++; } return 0; }
/* * mspec_init * * Called at boot time to initialize the mspec facility. */ static int __init mspec_init(void) { int ret; int nid; /* * The fetchop device only works on SN2 hardware, uncached and cached * memory drivers should both be valid on all ia64 hardware */ #ifdef CONFIG_SGI_SN if (ia64_platform_is("sn2")) { is_sn2 = 1; if (is_shub2()) { ret = -ENOMEM; for_each_node_state(nid, N_ONLINE) { int actual_nid; int nasid; unsigned long phys; scratch_page[nid] = uncached_alloc_page(nid, 1); if (scratch_page[nid] == 0) goto free_scratch_pages; phys = __pa(scratch_page[nid]); nasid = get_node_number(phys); actual_nid = nasid_to_cnodeid(nasid); if (actual_nid != nid) goto free_scratch_pages; } }
static void __init check_versions (struct ia64_sal_systab *systab) { sal_revision = (systab->sal_rev_major << 8) | systab->sal_rev_minor; sal_version = (systab->sal_b_rev_major << 8) | systab->sal_b_rev_minor; /* Check for broken firmware */ if ((sal_revision == SAL_VERSION_CODE(49, 29)) && (sal_version == SAL_VERSION_CODE(49, 29))) { /* * Old firmware for zx2000 prototypes have this weird version number, * reset it to something sane. */ sal_revision = SAL_VERSION_CODE(2, 8); sal_version = SAL_VERSION_CODE(0, 0); } if (ia64_platform_is("sn2") && (sal_revision == SAL_VERSION_CODE(2, 9))) /* * SGI Altix has hard-coded version 2.9 in their prom * but they actually implement 3.2, so let's fix it here. */ sal_revision = SAL_VERSION_CODE(3, 2); }
/* must be called with cpucontrol mutex held */ int __cpu_disable(void) { int cpu = smp_processor_id(); /* * dont permit boot processor for now */ if (cpu == 0 && !bsp_remove_ok) { printk ("Your platform does not support removal of BSP\n"); return (-EBUSY); } if (ia64_platform_is("sn2")) { if (!sn_cpu_disable_allowed(cpu)) return -EBUSY; } set_cpu_online(cpu, false); if (migrate_platform_irqs(cpu)) { set_cpu_online(cpu, true); return -EBUSY; } remove_siblinginfo(cpu); fixup_irqs(); local_flush_tlb_all(); cpu_clear(cpu, cpu_callin_map); return 0; }
static int __init simrs_init(void) { struct serial_state *state; int retval; if (!ia64_platform_is("hpsim")) return -ENODEV; hp_simserial_driver = alloc_tty_driver(NR_PORTS); if (!hp_simserial_driver) return -ENOMEM; printk(KERN_INFO "SimSerial driver with no serial options enabled\n"); /* Initialize the tty_driver structure */ hp_simserial_driver->driver_name = "simserial"; hp_simserial_driver->name = "ttyS"; hp_simserial_driver->major = TTY_MAJOR; hp_simserial_driver->minor_start = 64; hp_simserial_driver->type = TTY_DRIVER_TYPE_SERIAL; hp_simserial_driver->subtype = SERIAL_TYPE_NORMAL; hp_simserial_driver->init_termios = tty_std_termios; hp_simserial_driver->init_termios.c_cflag = B9600 | CS8 | CREAD | HUPCL | CLOCAL; hp_simserial_driver->flags = TTY_DRIVER_REAL_RAW; tty_set_operations(hp_simserial_driver, &hp_ops); state = rs_table; tty_port_init(&state->port); state->port.ops = &hp_port_ops; state->port.close_delay = 0; /* XXX really 0? */ retval = hpsim_get_irq(KEYBOARD_INTR); if (retval < 0) { printk(KERN_ERR "%s: out of interrupt vectors!\n", __func__); goto err_free_tty; } state->irq = retval; /* the port is imaginary */ printk(KERN_INFO "ttyS0 at 0x03f8 (irq = %d) is a 16550\n", state->irq); tty_port_link_device(&state->port, hp_simserial_driver, 0); retval = tty_register_driver(hp_simserial_driver); if (retval) { printk(KERN_ERR "Couldn't register simserial driver\n"); goto err_free_tty; } return 0; err_free_tty: put_tty_driver(hp_simserial_driver); tty_port_destroy(&state->port); return retval; }
static int get_memory_proximity_domain(struct acpi_table_memory_affinity *ma) { int pxm; pxm = ma->proximity_domain; if (ia64_platform_is("sn2")) pxm += ma->reserved1[0] << 8; return pxm; }
bool is_affinity_mask_valid(const struct cpumask *cpumask) { if (ia64_platform_is("sn2")) { /* Only allow one CPU to be specified in the smp_affinity mask */ if (cpumask_weight(cpumask) != 1) return false; } return true; }
static int get_processor_proximity_domain(struct acpi_srat_cpu_affinity *pa) { int pxm; pxm = pa->proximity_domain_lo; if (ia64_platform_is("sn2")) pxm += pa->proximity_domain_hi[0] << 8; return pxm; }
static int get_processor_proximity_domain(struct acpi_table_processor_affinity *pa) { int pxm; pxm = pa->proximity_domain; if (ia64_platform_is("sn2")) pxm += pa->reserved[0] << 8; return pxm; }
/* * The serial driver boot-time initialization code! */ static int __init simrs_init (void) { int i, rc; struct serial_state *state; if (!ia64_platform_is("hpsim")) return -ENODEV; hp_simserial_driver = alloc_tty_driver(1); if (!hp_simserial_driver) return -ENOMEM; show_serial_version(); /* Initialize the tty_driver structure */ hp_simserial_driver->owner = THIS_MODULE; hp_simserial_driver->driver_name = "simserial"; hp_simserial_driver->name = "ttyS"; hp_simserial_driver->major = TTY_MAJOR; hp_simserial_driver->minor_start = 64; hp_simserial_driver->type = TTY_DRIVER_TYPE_SERIAL; hp_simserial_driver->subtype = SERIAL_TYPE_NORMAL; hp_simserial_driver->init_termios = tty_std_termios; hp_simserial_driver->init_termios.c_cflag = B9600 | CS8 | CREAD | HUPCL | CLOCAL; hp_simserial_driver->flags = TTY_DRIVER_REAL_RAW; tty_set_operations(hp_simserial_driver, &hp_ops); /* * Let's have a little bit of fun ! */ for (i = 0, state = rs_table; i < NR_PORTS; i++,state++) { if (state->type == PORT_UNKNOWN) continue; if (!state->irq) { if ((rc = assign_irq_vector(AUTO_ASSIGN)) < 0) panic("%s: out of interrupt vectors!\n", __FUNCTION__); state->irq = rc; ia64_ssc_connect_irq(KEYBOARD_INTR, state->irq); } printk(KERN_INFO "ttyS%d at 0x%04lx (irq = %d) is a %s\n", state->line, state->port, state->irq, uart_config[state->type].name); } if (tty_register_driver(hp_simserial_driver)) panic("Couldn't register simserial driver\n"); return 0; }
static int get_memory_proximity_domain(struct acpi_srat_mem_affinity *ma) { int pxm; pxm = ma->proximity_domain; if (!ia64_platform_is("sn2")) pxm &= 0xff; return pxm; }
void __init hpsim_setup (char **cmdline_p) { ROOT_DEV = Root_SDA1; /* default to first SCSI drive */ #ifdef CONFIG_HP_SIMSERIAL_CONSOLE { extern struct console hpsim_cons; if (ia64_platform_is("hpsim")) register_console(&hpsim_cons); } #endif }
static int __init get_processor_proximity_domain(struct acpi_srat_cpu_affinity *pa) { int pxm; pxm = pa->proximity_domain_lo; if (srat_rev >= 2) { pxm += pa->proximity_domain_hi[0] << 8; pxm += pa->proximity_domain_hi[1] << 16; pxm += pa->proximity_domain_hi[2] << 24; } else if (ia64_platform_is("sn2")) pxm += pa->proximity_domain_hi[0] << 8; return pxm; }
static int __init tiocx_init(void) { cnodeid_t cnodeid; int found_tiocx_device = 0; if (!ia64_platform_is("sn2")) return 0; bus_register(&tiocx_bus_type); for (cnodeid = 0; cnodeid < num_cnodes; cnodeid++) { nasid_t nasid; int bt; nasid = cnodeid_to_nasid(cnodeid); if ((nasid & 0x1) && is_fpga_tio(nasid, &bt)) { struct hubdev_info *hubdev; struct xwidget_info *widgetp; DBG("Found TIO at nasid 0x%x\n", nasid); hubdev = (struct hubdev_info *)(NODEPDA(cnodeid)->pdinfo); widgetp = &hubdev->hdi_xwidget_info[TIOCX_CORELET]; /* The CE hangs off of the CX port but is not an FPGA */ if (widgetp->xwi_hwid.part_num == TIO_CE_ASIC_PARTNUM) continue; tio_corelet_reset(nasid, TIOCX_CORELET); tio_conveyor_enable(nasid); if (cx_device_register (nasid, widgetp->xwi_hwid.part_num, widgetp->xwi_hwid.mfg_num, hubdev, bt) < 0) return -ENXIO; else found_tiocx_device++; } } /* It's ok if we find zero devices. */ DBG("found_tiocx_device= %d\n", found_tiocx_device); return 0; }
int __init xp_init(void) { int ret, ch_number; u64 func_addr = *(u64 *) xp_nofault_PIOR; u64 err_func_addr = *(u64 *) xp_error_PIOR; if (!ia64_platform_is("sn2")) { return -ENODEV; } /* * Register a nofault code region which performs a cross-partition * PIO read. If the PIO read times out, the MCA handler will consume * the error and return to a kernel-provided instruction to indicate * an error. This PIO read exists because it is guaranteed to timeout * if the destination is down (AMO operations do not timeout on at * least some CPUs on Shubs <= v1.2, which unfortunately we have to * work around). */ if ((ret = sn_register_nofault_code(func_addr, err_func_addr, err_func_addr, 1, 1)) != 0) { printk(KERN_ERR "XP: can't register nofault code, error=%d\n", ret); } /* * Setup the nofault PIO read target. (There is no special reason why * SH_IPI_ACCESS was selected.) */ if (is_shub2()) { xp_nofault_PIOR_target = SH2_IPI_ACCESS0; } else { xp_nofault_PIOR_target = SH1_IPI_ACCESS; } /* initialize the connection registration mutex */ for (ch_number = 0; ch_number < XPC_NCHANNELS; ch_number++) { mutex_init(&xpc_registrations[ch_number].mutex); } return 0; }
int __init prominfo_init(void) { struct proc_dir_entry **entp; struct proc_dir_entry *p; cnodeid_t cnodeid; nasid_t nasid; char name[NODE_NAME_LEN]; if (!ia64_platform_is("sn2")) return 0; TRACE(); DPRINTK("running on cpu %d\n", smp_processor_id()); DPRINTK("numnodes %d\n", numnodes); proc_entries = kmalloc(numnodes * sizeof(struct proc_dir_entry *), GFP_KERNEL); sgi_prominfo_entry = proc_mkdir("sgi_prominfo", NULL); for (cnodeid = 0, entp = proc_entries; cnodeid < numnodes; cnodeid++, entp++) { sprintf(name, "node%d", cnodeid); *entp = proc_mkdir(name, sgi_prominfo_entry); nasid = cnodeid_to_nasid(cnodeid); p = create_proc_read_entry( "fit", 0, *entp, read_fit_entry, lookup_fit(nasid)); if (p) p->owner = THIS_MODULE; p = create_proc_read_entry( "version", 0, *entp, read_version_entry, lookup_fit(nasid)); if (p) p->owner = THIS_MODULE; } return 0; }
/* * mspec_init * * Called at boot time to initialize the mspec facility. */ static int __init mspec_init(void) { int ret; int nid; /* * The fetchop device only works on SN2 hardware, uncached and cached * memory drivers should both be valid on all ia64 hardware */ #ifdef CONFIG_SGI_SN if (ia64_platform_is("sn2")) { is_sn2 = 1; if (is_shub2()) { ret = -ENOMEM; for_each_online_node(nid) { int actual_nid; int nasid; unsigned long phys; scratch_page[nid] = uncached_alloc_page(nid); if (scratch_page[nid] == 0) goto free_scratch_pages; phys = __pa(scratch_page[nid]); nasid = get_node_number(phys); actual_nid = nasid_to_cnodeid(nasid); if (actual_nid != nid) goto free_scratch_pages; } } ret = misc_register(&fetchop_miscdev); if (ret) { printk(KERN_ERR "%s: failed to register device %i\n", FETCHOP_ID, ret); goto free_scratch_pages; } }
/* * Add a new chunk of uncached memory pages to the specified pool. * * @pool: pool to add new chunk of uncached memory to * @nid: node id of node to allocate memory from, or -1 * * This is accomplished by first allocating a granule of cached memory pages * and then converting them to uncached memory pages. */ static int uncached_add_chunk(struct uncached_pool *uc_pool, int nid) { struct page *page; int status, i, nchunks_added = uc_pool->nchunks_added; unsigned long c_addr, uc_addr; if (mutex_lock_interruptible(&uc_pool->add_chunk_mutex) != 0) return -1; /* interrupted by a signal */ if (uc_pool->nchunks_added > nchunks_added) { /* someone added a new chunk while we were waiting */ mutex_unlock(&uc_pool->add_chunk_mutex); return 0; } if (uc_pool->nchunks_added >= MAX_CONVERTED_CHUNKS_PER_NODE) { mutex_unlock(&uc_pool->add_chunk_mutex); return -1; } /* attempt to allocate a granule's worth of cached memory pages */ page = alloc_pages_exact_node(nid, GFP_KERNEL | __GFP_ZERO | GFP_THISNODE, IA64_GRANULE_SHIFT-PAGE_SHIFT); if (!page) { mutex_unlock(&uc_pool->add_chunk_mutex); return -1; } /* convert the memory pages from cached to uncached */ c_addr = (unsigned long)page_address(page); uc_addr = c_addr - PAGE_OFFSET + __IA64_UNCACHED_OFFSET; /* * There's a small race here where it's possible for someone to * access the page through /dev/mem halfway through the conversion * to uncached - not sure it's really worth bothering about */ for (i = 0; i < (IA64_GRANULE_SIZE / PAGE_SIZE); i++) SetPageUncached(&page[i]); flush_tlb_kernel_range(uc_addr, uc_addr + IA64_GRANULE_SIZE); status = ia64_pal_prefetch_visibility(PAL_VISIBILITY_PHYSICAL); if (status == PAL_VISIBILITY_OK_REMOTE_NEEDED) { atomic_set(&uc_pool->status, 0); status = smp_call_function(uncached_ipi_visibility, uc_pool, 1); if (status || atomic_read(&uc_pool->status)) goto failed; } else if (status != PAL_VISIBILITY_OK) goto failed; preempt_disable(); if (ia64_platform_is("sn2")) sn_flush_all_caches(uc_addr, IA64_GRANULE_SIZE); else flush_icache_range(uc_addr, uc_addr + IA64_GRANULE_SIZE); /* flush the just introduced uncached translation from the TLB */ local_flush_tlb_all(); preempt_enable(); status = ia64_pal_mc_drain(); if (status != PAL_STATUS_SUCCESS) goto failed; atomic_set(&uc_pool->status, 0); status = smp_call_function(uncached_ipi_mc_drain, uc_pool, 1); if (status || atomic_read(&uc_pool->status)) goto failed; /* * The chunk of memory pages has been converted to uncached so now we * can add it to the pool. */ status = gen_pool_add(uc_pool->pool, uc_addr, IA64_GRANULE_SIZE, nid); if (status) goto failed; uc_pool->nchunks_added++; mutex_unlock(&uc_pool->add_chunk_mutex); return 0; /* failed to convert or add the chunk so give it back to the kernel */ failed: for (i = 0; i < (IA64_GRANULE_SIZE / PAGE_SIZE); i++) ClearPageUncached(&page[i]); free_pages(c_addr, IA64_GRANULE_SHIFT-PAGE_SHIFT); mutex_unlock(&uc_pool->add_chunk_mutex); return -1; }
int __init acpi_boot_init(void) { /* * MADT * ---- * Parse the Multiple APIC Description Table (MADT), if exists. * Note that this table provides platform SMP configuration * information -- the successor to MPS tables. */ if (acpi_table_parse(ACPI_SIG_MADT, acpi_parse_madt)) { printk(KERN_ERR PREFIX "Can't find MADT\n"); goto skip_madt; } /* Local APIC */ if (acpi_table_parse_madt (ACPI_MADT_TYPE_LOCAL_APIC_OVERRIDE, acpi_parse_lapic_addr_ovr, 0) < 0) printk(KERN_ERR PREFIX "Error parsing LAPIC address override entry\n"); if (acpi_table_parse_madt(ACPI_MADT_TYPE_LOCAL_SAPIC, acpi_parse_lsapic, NR_CPUS) < 1) printk(KERN_ERR PREFIX "Error parsing MADT - no LAPIC entries\n"); if (acpi_table_parse_madt(ACPI_MADT_TYPE_LOCAL_APIC_NMI, acpi_parse_lapic_nmi, 0) < 0) printk(KERN_ERR PREFIX "Error parsing LAPIC NMI entry\n"); /* I/O APIC */ if (acpi_table_parse_madt (ACPI_MADT_TYPE_IO_SAPIC, acpi_parse_iosapic, NR_IOSAPICS) < 1) { if (!ia64_platform_is("sn2")) printk(KERN_ERR PREFIX "Error parsing MADT - no IOSAPIC entries\n"); } /* System-Level Interrupt Routing */ if (acpi_table_parse_madt (ACPI_MADT_TYPE_INTERRUPT_SOURCE, acpi_parse_plat_int_src, ACPI_MAX_PLATFORM_INTERRUPTS) < 0) printk(KERN_ERR PREFIX "Error parsing platform interrupt source entry\n"); if (acpi_table_parse_madt (ACPI_MADT_TYPE_INTERRUPT_OVERRIDE, acpi_parse_int_src_ovr, 0) < 0) printk(KERN_ERR PREFIX "Error parsing interrupt source overrides entry\n"); if (acpi_table_parse_madt(ACPI_MADT_TYPE_NMI_SOURCE, acpi_parse_nmi_src, 0) < 0) printk(KERN_ERR PREFIX "Error parsing NMI SRC entry\n"); skip_madt: /* * FADT says whether a legacy keyboard controller is present. * The FADT also contains an SCI_INT line, by which the system * gets interrupts such as power and sleep buttons. If it's not * on a Legacy interrupt, it needs to be setup. */ if (acpi_table_parse(ACPI_SIG_FADT, acpi_parse_fadt)) printk(KERN_ERR PREFIX "Can't find FADT\n"); #ifdef CONFIG_SMP if (available_cpus == 0) { printk(KERN_INFO "ACPI: Found 0 CPUS; assuming 1\n"); printk(KERN_INFO "CPU 0 (0x%04x)", hard_smp_processor_id()); smp_boot_data.cpu_phys_id[available_cpus] = hard_smp_processor_id(); available_cpus = 1; /* We've got at least one of these, no? */ } smp_boot_data.cpu_count = available_cpus; smp_build_cpu_map(); # ifdef CONFIG_ACPI_NUMA if (srat_num_cpus == 0) { int cpu, i = 1; for (cpu = 0; cpu < smp_boot_data.cpu_count; cpu++) if (smp_boot_data.cpu_phys_id[cpu] != hard_smp_processor_id()) node_cpuid[i++].phys_id = smp_boot_data.cpu_phys_id[cpu]; } # endif #endif #ifdef CONFIG_ACPI_NUMA build_cpu_to_node_map(); #endif /* Make boot-up look pretty */ printk(KERN_INFO "%d CPUs available, %d CPUs total\n", available_cpus, total_cpus); return 0; }
int __init acpi_boot_init(void) { if (acpi_table_parse(ACPI_SIG_MADT, acpi_parse_madt)) { printk(KERN_ERR PREFIX "Can't find MADT\n"); goto skip_madt; } if (acpi_table_parse_madt (ACPI_MADT_TYPE_LOCAL_APIC_OVERRIDE, acpi_parse_lapic_addr_ovr, 0) < 0) printk(KERN_ERR PREFIX "Error parsing LAPIC address override entry\n"); if (acpi_table_parse_madt(ACPI_MADT_TYPE_LOCAL_APIC_NMI, acpi_parse_lapic_nmi, 0) < 0) printk(KERN_ERR PREFIX "Error parsing LAPIC NMI entry\n"); if (acpi_table_parse_madt (ACPI_MADT_TYPE_IO_SAPIC, acpi_parse_iosapic, NR_IOSAPICS) < 1) { if (!ia64_platform_is("sn2")) printk(KERN_ERR PREFIX "Error parsing MADT - no IOSAPIC entries\n"); } if (acpi_table_parse_madt (ACPI_MADT_TYPE_INTERRUPT_SOURCE, acpi_parse_plat_int_src, ACPI_MAX_PLATFORM_INTERRUPTS) < 0) printk(KERN_ERR PREFIX "Error parsing platform interrupt source entry\n"); if (acpi_table_parse_madt (ACPI_MADT_TYPE_INTERRUPT_OVERRIDE, acpi_parse_int_src_ovr, 0) < 0) printk(KERN_ERR PREFIX "Error parsing interrupt source overrides entry\n"); if (acpi_table_parse_madt(ACPI_MADT_TYPE_NMI_SOURCE, acpi_parse_nmi_src, 0) < 0) printk(KERN_ERR PREFIX "Error parsing NMI SRC entry\n"); skip_madt: if (acpi_table_parse(ACPI_SIG_FADT, acpi_parse_fadt)) printk(KERN_ERR PREFIX "Can't find FADT\n"); #ifdef CONFIG_ACPI_NUMA #ifdef CONFIG_SMP if (srat_num_cpus == 0) { int cpu, i = 1; for (cpu = 0; cpu < smp_boot_data.cpu_count; cpu++) if (smp_boot_data.cpu_phys_id[cpu] != hard_smp_processor_id()) node_cpuid[i++].phys_id = smp_boot_data.cpu_phys_id[cpu]; } #endif build_cpu_to_node_map(); #endif return 0; }
static int __init sn_salinfo_init(void) { if (ia64_platform_is("sn2")) salinfo_platform_oemdata = &sn_salinfo_platform_oemdata; return 0; }
/* * scdrv_init * * Called at boot time to initialize the system controller communication * facility. */ int __init scdrv_init(void) { geoid_t geoid; cnodeid_t cnode; char devname[32]; char *devnamep; struct sysctl_data_s *scd; void *salbuf; dev_t first_dev, dev; nasid_t event_nasid; if (!ia64_platform_is("sn2")) return -ENODEV; event_nasid = ia64_sn_get_console_nasid(); if (alloc_chrdev_region(&first_dev, 0, num_cnodes, SYSCTL_BASENAME) < 0) { printk("%s: failed to register SN system controller device\n", __func__); return -ENODEV; } snsc_class = class_create(THIS_MODULE, SYSCTL_BASENAME); for (cnode = 0; cnode < num_cnodes; cnode++) { geoid = cnodeid_get_geoid(cnode); devnamep = devname; format_module_id(devnamep, geo_module(geoid), MODULE_FORMAT_BRIEF); devnamep = devname + strlen(devname); sprintf(devnamep, "^%d#%d", geo_slot(geoid), geo_slab(geoid)); /* allocate sysctl device data */ scd = kzalloc(sizeof (struct sysctl_data_s), GFP_KERNEL); if (!scd) { printk("%s: failed to allocate device info" "for %s/%s\n", __func__, SYSCTL_BASENAME, devname); continue; } /* initialize sysctl device data fields */ scd->scd_nasid = cnodeid_to_nasid(cnode); if (!(salbuf = kmalloc(SCDRV_BUFSZ, GFP_KERNEL))) { printk("%s: failed to allocate driver buffer" "(%s%s)\n", __func__, SYSCTL_BASENAME, devname); kfree(scd); continue; } if (ia64_sn_irtr_init(scd->scd_nasid, salbuf, SCDRV_BUFSZ) < 0) { printk ("%s: failed to initialize SAL for" " system controller communication" " (%s/%s): outdated PROM?\n", __func__, SYSCTL_BASENAME, devname); kfree(scd); kfree(salbuf); continue; } dev = first_dev + cnode; cdev_init(&scd->scd_cdev, &scdrv_fops); if (cdev_add(&scd->scd_cdev, dev, 1)) { printk("%s: failed to register system" " controller device (%s%s)\n", __func__, SYSCTL_BASENAME, devname); kfree(scd); kfree(salbuf); continue; } device_create(snsc_class, NULL, dev, NULL, "%s", devname); ia64_sn_irtr_intr_enable(scd->scd_nasid, 0 /*ignored */ , SAL_IROUTER_INTR_RECV); /* on the console nasid, prepare to receive * system controller environmental events */ if(scd->scd_nasid == event_nasid) { scdrv_event_init(scd); } } return 0; }
static int uncached_add_chunk(struct uncached_pool *uc_pool, int nid) { struct page *page; int status, i, nchunks_added = uc_pool->nchunks_added; unsigned long c_addr, uc_addr; if (mutex_lock_interruptible(&uc_pool->add_chunk_mutex) != 0) return -1; /* */ if (uc_pool->nchunks_added > nchunks_added) { /* */ mutex_unlock(&uc_pool->add_chunk_mutex); return 0; } if (uc_pool->nchunks_added >= MAX_CONVERTED_CHUNKS_PER_NODE) { mutex_unlock(&uc_pool->add_chunk_mutex); return -1; } /* */ page = alloc_pages_exact_node(nid, GFP_KERNEL | __GFP_ZERO | GFP_THISNODE, IA64_GRANULE_SHIFT-PAGE_SHIFT); if (!page) { mutex_unlock(&uc_pool->add_chunk_mutex); return -1; } /* */ c_addr = (unsigned long)page_address(page); uc_addr = c_addr - PAGE_OFFSET + __IA64_UNCACHED_OFFSET; /* */ for (i = 0; i < (IA64_GRANULE_SIZE / PAGE_SIZE); i++) SetPageUncached(&page[i]); flush_tlb_kernel_range(uc_addr, uc_addr + IA64_GRANULE_SIZE); status = ia64_pal_prefetch_visibility(PAL_VISIBILITY_PHYSICAL); if (status == PAL_VISIBILITY_OK_REMOTE_NEEDED) { atomic_set(&uc_pool->status, 0); status = smp_call_function(uncached_ipi_visibility, uc_pool, 1); if (status || atomic_read(&uc_pool->status)) goto failed; } else if (status != PAL_VISIBILITY_OK) goto failed; preempt_disable(); if (ia64_platform_is("sn2")) sn_flush_all_caches(uc_addr, IA64_GRANULE_SIZE); else flush_icache_range(uc_addr, uc_addr + IA64_GRANULE_SIZE); /* */ local_flush_tlb_all(); preempt_enable(); status = ia64_pal_mc_drain(); if (status != PAL_STATUS_SUCCESS) goto failed; atomic_set(&uc_pool->status, 0); status = smp_call_function(uncached_ipi_mc_drain, uc_pool, 1); if (status || atomic_read(&uc_pool->status)) goto failed; /* */ status = gen_pool_add(uc_pool->pool, uc_addr, IA64_GRANULE_SIZE, nid); if (status) goto failed; uc_pool->nchunks_added++; mutex_unlock(&uc_pool->add_chunk_mutex); return 0; /* */ failed: for (i = 0; i < (IA64_GRANULE_SIZE / PAGE_SIZE); i++) ClearPageUncached(&page[i]); free_pages(c_addr, IA64_GRANULE_SHIFT-PAGE_SHIFT); mutex_unlock(&uc_pool->add_chunk_mutex); return -1; }
int __init xpc_init(void) { int ret; short partid; struct xpc_partition *part; struct task_struct *kthread; size_t buf_size; if (!ia64_platform_is("sn2")) return -ENODEV; buf_size = max(XPC_RP_VARS_SIZE, XPC_RP_HEADER_SIZE + XP_NASID_MASK_BYTES); xpc_remote_copy_buffer = xpc_kmalloc_cacheline_aligned(buf_size, GFP_KERNEL, &xpc_remote_copy_buffer_base); if (xpc_remote_copy_buffer == NULL) return -ENOMEM; snprintf(xpc_part->bus_id, BUS_ID_SIZE, "part"); snprintf(xpc_chan->bus_id, BUS_ID_SIZE, "chan"); xpc_sysctl = register_sysctl_table(xpc_sys_dir); /* * The first few fields of each entry of xpc_partitions[] need to * be initialized now so that calls to xpc_connect() and * xpc_disconnect() can be made prior to the activation of any remote * partition. NOTE THAT NONE OF THE OTHER FIELDS BELONGING TO THESE * ENTRIES ARE MEANINGFUL UNTIL AFTER AN ENTRY'S CORRESPONDING * PARTITION HAS BEEN ACTIVATED. */ for (partid = 1; partid < XP_MAX_PARTITIONS; partid++) { part = &xpc_partitions[partid]; DBUG_ON((u64)part != L1_CACHE_ALIGN((u64)part)); part->act_IRQ_rcvd = 0; spin_lock_init(&part->act_lock); part->act_state = XPC_P_INACTIVE; XPC_SET_REASON(part, 0, 0); init_timer(&part->disengage_request_timer); part->disengage_request_timer.function = xpc_timeout_partition_disengage_request; part->disengage_request_timer.data = (unsigned long)part; part->setup_state = XPC_P_UNSET; init_waitqueue_head(&part->teardown_wq); atomic_set(&part->references, 0); } /* * Open up protections for IPI operations (and AMO operations on * Shub 1.1 systems). */ xpc_allow_IPI_ops(); /* * Interrupts being processed will increment this atomic variable and * awaken the heartbeat thread which will process the interrupts. */ atomic_set(&xpc_act_IRQ_rcvd, 0); /* * This is safe to do before the xpc_hb_checker thread has started * because the handler releases a wait queue. If an interrupt is * received before the thread is waiting, it will not go to sleep, * but rather immediately process the interrupt. */ ret = request_irq(SGI_XPC_ACTIVATE, xpc_act_IRQ_handler, 0, "xpc hb", NULL); if (ret != 0) { dev_err(xpc_part, "can't register ACTIVATE IRQ handler, " "errno=%d\n", -ret); xpc_restrict_IPI_ops(); if (xpc_sysctl) unregister_sysctl_table(xpc_sysctl); kfree(xpc_remote_copy_buffer_base); return -EBUSY; } /* * Fill the partition reserved page with the information needed by * other partitions to discover we are alive and establish initial * communications. */ xpc_rsvd_page = xpc_rsvd_page_init(); if (xpc_rsvd_page == NULL) { dev_err(xpc_part, "could not setup our reserved page\n"); free_irq(SGI_XPC_ACTIVATE, NULL); xpc_restrict_IPI_ops(); if (xpc_sysctl) unregister_sysctl_table(xpc_sysctl); kfree(xpc_remote_copy_buffer_base); return -EBUSY; } /* add ourselves to the reboot_notifier_list */ ret = register_reboot_notifier(&xpc_reboot_notifier); if (ret != 0) dev_warn(xpc_part, "can't register reboot notifier\n"); /* add ourselves to the die_notifier list */ ret = register_die_notifier(&xpc_die_notifier); if (ret != 0) dev_warn(xpc_part, "can't register die notifier\n"); init_timer(&xpc_hb_timer); xpc_hb_timer.function = xpc_hb_beater; /* * The real work-horse behind xpc. This processes incoming * interrupts and monitors remote heartbeats. */ kthread = kthread_run(xpc_hb_checker, NULL, XPC_HB_CHECK_THREAD_NAME); if (IS_ERR(kthread)) { dev_err(xpc_part, "failed while forking hb check thread\n"); /* indicate to others that our reserved page is uninitialized */ xpc_rsvd_page->vars_pa = 0; /* take ourselves off of the reboot_notifier_list */ (void)unregister_reboot_notifier(&xpc_reboot_notifier); /* take ourselves off of the die_notifier list */ (void)unregister_die_notifier(&xpc_die_notifier); del_timer_sync(&xpc_hb_timer); free_irq(SGI_XPC_ACTIVATE, NULL); xpc_restrict_IPI_ops(); if (xpc_sysctl) unregister_sysctl_table(xpc_sysctl); kfree(xpc_remote_copy_buffer_base); return -EBUSY; } /* * Startup a thread that will attempt to discover other partitions to * activate based on info provided by SAL. This new thread is short * lived and will exit once discovery is complete. */ kthread = kthread_run(xpc_initiate_discovery, NULL, XPC_DISCOVERY_THREAD_NAME); if (IS_ERR(kthread)) { dev_err(xpc_part, "failed while forking discovery thread\n"); /* mark this new thread as a non-starter */ complete(&xpc_discovery_exited); xpc_do_exit(xpUnloading); return -EBUSY; } /* set the interface to point at XPC's functions */ xpc_set_interface(xpc_initiate_connect, xpc_initiate_disconnect, xpc_initiate_allocate, xpc_initiate_send, xpc_initiate_send_notify, xpc_initiate_received, xpc_initiate_partid_to_nasids); return 0; }