static int setup_cpu_watcher(struct notifier_block *notifier, unsigned long event, void *data) { static struct xenbus_watch cpu_watch = { .node = "cpu", .callback = handle_vcpu_hotplug_event}; (void)register_xenbus_watch(&cpu_watch); return NOTIFY_DONE; } static int __init setup_vcpu_hotplug_event(void) { static struct notifier_block xsn_cpu = { .notifier_call = setup_cpu_watcher }; if (!xen_pv_domain()) return -ENODEV; register_xenstore_notifier(&xsn_cpu); return 0; } arch_initcall(setup_vcpu_hotplug_event);
static int setup_cpu_watcher(struct notifier_block *notifier, unsigned long event, void *data) { int cpu; static struct xenbus_watch cpu_watch = { .node = "cpu", .callback = handle_vcpu_hotplug_event}; (void)register_xenbus_watch(&cpu_watch); for_each_possible_cpu(cpu) { if (vcpu_online(cpu) == 0) { (void)cpu_down(cpu); set_cpu_present(cpu, false); } } return NOTIFY_DONE; } static int __init setup_vcpu_hotplug_event(void) { static struct notifier_block xsn_cpu = { .notifier_call = setup_cpu_watcher }; if (!xen_pv_domain()) return -ENODEV; register_xenstore_notifier(&xsn_cpu); return 0; } arch_initcall(setup_vcpu_hotplug_event);
static int __cpuinit setup_cpu_watcher(struct notifier_block *notifier, unsigned long event, void *data) { unsigned int i; static struct xenbus_watch __cpuinitdata cpu_watch = { .node = "cpu", .callback = handle_vcpu_hotplug_event, .flags = XBWF_new_thread }; (void)register_xenbus_watch(&cpu_watch); if (!is_initial_xendomain()) { for_each_possible_cpu(i) vcpu_hotplug(i); printk(KERN_INFO "Brought up %ld CPUs\n", (long)num_online_cpus()); } return NOTIFY_DONE; } static int __init setup_vcpu_hotplug_event(void) { static struct notifier_block hotplug_cpu = { .notifier_call = smpboot_cpu_notify }; static struct notifier_block __cpuinitdata xsn_cpu = { .notifier_call = setup_cpu_watcher }; if (!is_running_on_xen()) return -ENODEV; register_cpu_notifier(&hotplug_cpu); register_xenstore_notifier(&xsn_cpu); return 0; } arch_initcall(setup_vcpu_hotplug_event); int __ref smp_suspend(void) { unsigned int cpu; int err; for_each_online_cpu(cpu) { if (cpu == 0) continue; err = cpu_down(cpu); if (err) { printk(KERN_CRIT "Failed to take all CPUs " "down: %d.\n", err); for_each_possible_cpu(cpu) vcpu_hotplug(cpu); return err; } } return 0; }
/* SPIDEV driver registration */ static int __init lpc313x_spidev_register(void) { struct spi_board_info info = { .modalias = "spidev", .max_speed_hz = 1000000, .bus_num = 0, .chip_select = 0, }; return spi_register_board_info(&info, 1); } arch_initcall(lpc313x_spidev_register); #endif #endif static struct platform_device *devices[] __initdata = { &lpc313x_mci_device, #if defined(CONFIG_SPI_LPC313X) &lpc313x_spi_device, #endif }; static struct map_desc val3154_io_desc[] __initdata = { { .virtual = io_p2v(EXT_SRAM0_PHYS), .pfn = __phys_to_pfn(EXT_SRAM0_PHYS), .length = SZ_4K, .type = MT_DEVICE }, { .virtual = io_p2v(EXT_SRAM1_PHYS + 0x10000), .pfn = __phys_to_pfn(EXT_SRAM1_PHYS + 0x10000), .length = SZ_4K, .type = MT_DEVICE }, { .virtual = io_p2v(IO_SDMMC_PHYS),
static long gpio_wdt_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { void __user *argp = (void __user *)arg; int __user *p = (int __user *)argp; unsigned int value; static const struct watchdog_info ident = { .options = WDIOF_CARDRESET, .identity = "GPIO WDT", }; switch (cmd) { case WDIOC_GETSUPPORT: if (copy_to_user(argp, &ident, sizeof(ident))) return -EFAULT; break; case WDIOC_GETSTATUS: case WDIOC_GETBOOTSTATUS: put_user(0, p); break; case WDIOC_SETOPTIONS: if (get_user(value, p)) return -EFAULT; if (value & WDIOS_ENABLECARD) gpio_wdt_start(); else if (value & WDIOS_DISABLECARD) gpio_wdt_stop(); else return -EINVAL; return 0; case WDIOC_KEEPALIVE: gpio_wdt_reset(); break; default: return -ENOTTY; } return 0; } static ssize_t gpio_wdt_write(struct file *file, const char *buf, size_t count, loff_t *ppos) { if (!count) return -EIO; gpio_wdt_reset(); return count; } static const struct file_operations gpio_wdt_fops = { .owner = THIS_MODULE, .llseek = no_llseek, .unlocked_ioctl = gpio_wdt_ioctl, .open = gpio_wdt_open, .write = gpio_wdt_write, .release = gpio_wdt_release, }; static struct miscdevice gpio_wdt_misc = { .minor = WATCHDOG_MINOR, .name = "watchdog", .fops = &gpio_wdt_fops, }; static int gpio_wdt_probe(struct platform_device *pdev) { int ret; struct gpio_wdt_platform_data *gpio_wdt_data = pdev->dev.platform_data; gpio_wdt_device.gpio = gpio_wdt_data->gpio; gpio_wdt_device.interval = gpio_wdt_data->interval; gpio_wdt_device.first_interval = gpio_wdt_data->first_interval; if (gpio_wdt_device.first_interval <= 0) { gpio_wdt_device.first_interval = gpio_wdt_device.interval; } ret = gpio_request(gpio_wdt_device.gpio, "gpio-wdt"); if (ret < 0) { dev_err(&pdev->dev, "failed to request gpio"); return ret; } spin_lock_init(&gpio_wdt_device.lock); init_completion(&gpio_wdt_device.stop); gpio_wdt_device.queue = 0; clear_bit(0, &gpio_wdt_device.inuse); setup_timer(&gpio_wdt_device.timer, gpio_wdt_trigger, 0L); gpio_wdt_device.default_ticks = ticks; gpio_wdt_start(); dev_info(&pdev->dev, "GPIO Hardware Watchdog driver (gpio=%i interval=%i/%i)\n", gpio_wdt_data->gpio, gpio_wdt_data->first_interval, gpio_wdt_data->interval); return 0; } static int gpio_wdt_remove(struct platform_device *pdev) { /* FIXME: do we need to lock this test ? */ if (gpio_wdt_device.queue) { gpio_wdt_device.queue = 0; wait_for_completion(&gpio_wdt_device.stop); } gpio_free(gpio_wdt_device.gpio); misc_deregister(&gpio_wdt_misc); return 0; } static struct platform_driver gpio_wdt_driver = { .probe = gpio_wdt_probe, .remove = gpio_wdt_remove, .driver.name = "gpio-wdt", .driver.owner = THIS_MODULE, }; static int __init gpio_wdt_init(void) { return platform_driver_register(&gpio_wdt_driver); } arch_initcall(gpio_wdt_init); /* * We do wdt initialization in two steps: arch_initcall probes the wdt * very early to start pinging the watchdog (misc devices are not yet * available), and later module_init() just registers the misc device. */ static int gpio_wdt_init_late(void) { int ret; ret = misc_register(&gpio_wdt_misc); if (ret < 0) { pr_err("GPIO_WDT: failed to register misc device\n"); return ret; } return 0; } #ifndef MODULE module_init(gpio_wdt_init_late); #endif static void __exit gpio_wdt_exit(void) { platform_driver_unregister(&gpio_wdt_driver); }
/* * Initialization. Try all known PCI access methods. Note that we support * using both PCI BIOS and direct access: in such cases, we use I/O ports * to access config space, but we still keep BIOS order of cards to be * compatible with 2.0.X. This should go away some day. */ static int __init pcibios_init(void) { ioport_resource.start = 0xA0000000; ioport_resource.end = 0xDFFFFFFF; iomem_resource.start = 0xA0000000; iomem_resource.end = 0xDFFFFFFF; if (!pci_probe) return 0; if (pci_check_direct() < 0) { printk(KERN_WARNING "PCI: No PCI bus detected\n"); return 0; } printk(KERN_INFO "PCI: Probing PCI hardware [mempage %08x]\n", MEM_PAGING_REG); { #if 0 static struct pci_bus am33_root_bus = { .children = LIST_HEAD_INIT(am33_root_bus.children), .devices = LIST_HEAD_INIT(am33_root_bus.devices), .number = 0, .secondary = 0, .resource = { &ioport_resource, &iomem_resource }, }; am33_root_bus.ops = pci_root_ops; list_add_tail(&am33_root_bus.node, &pci_root_buses); am33_root_bus.subordinate = pci_do_scan_bus(0); pci_root_bus = &am33_root_bus; #else pci_root_bus = pci_scan_bus(0, &pci_direct_ampci, NULL); #endif } pcibios_irq_init(); pcibios_fixup_irqs(); #if 0 pcibios_resource_survey(); #endif return 0; } arch_initcall(pcibios_init); char *__init pcibios_setup(char *str) { if (!strcmp(str, "off")) { pci_probe = 0; return NULL; } else if (!strncmp(str, "lastbus=", 8)) { pcibios_last_bus = simple_strtol(str+8, NULL, 0); return NULL; } return str; } int pcibios_enable_device(struct pci_dev *dev, int mask) { int err; err = pcibios_enable_resources(dev, mask); if (err == 0) pcibios_enable_irq(dev); return err; }
/* SPIDEV driver registration */ static int __init lpc313x_spidev_register(void) { struct spi_board_info info = { .modalias = "spidev", .max_speed_hz = 1000000, .bus_num = 0, .chip_select = 0, }; return spi_register_board_info(&info, 1); } arch_initcall(lpc313x_spidev_register); #endif #endif static struct lpc313x_mci_board val3153_mci_platform_data = { .num_slots = 2, .detect_delay_ms = 250, .init = mci_init, .get_ro = mci_get_ro, .get_cd = mci_get_cd, .get_ocr = mci_get_ocr, .get_bus_wd = mci_get_bus_wd, .setpower = mci_setpower, .select_slot = mci_select_slot, .exit = mci_exit, }; static u64 mci_dmamask = 0xffffffffUL; static struct platform_device lpc313x_mci_device = { .name = "lpc313x_mmc", .num_resources = ARRAY_SIZE(lpc313x_mci_resources), .dev = { .dma_mask = &mci_dmamask, .coherent_dma_mask = 0xffffffff, .platform_data = &val3153_mci_platform_data, }, .resource = lpc313x_mci_resources, }; static struct platform_device *devices[] __initdata = { &cs89x0_device, &lpc313x_mci_device, #if defined (CONFIG_MTD_NAND_LPC313X) &lpc313x_nand_device, #endif #if defined(CONFIG_SPI_LPC313X) &lpc313x_spi_device, #endif }; static struct map_desc val3153_io_desc[] __initdata = { { .virtual = io_p2v(EXT_SRAM0_PHYS), .pfn = __phys_to_pfn(EXT_SRAM0_PHYS), .length = SZ_4K, .type = MT_DEVICE }, { .virtual = io_p2v(EXT_SRAM1_PHYS + 0x10000), .pfn = __phys_to_pfn(EXT_SRAM1_PHYS + 0x10000), .length = SZ_4K, .type = MT_DEVICE }, { .virtual = io_p2v(IO_SDMMC_PHYS),
/* SPIDEV driver registration */ static int __init lpc313x_spidev_register(void) { struct spi_board_info info = { .modalias = "spidev", .max_speed_hz = 1000000, .bus_num = 0, .chip_select = 0, }; return spi_register_board_info(&info, 1); } arch_initcall(lpc313x_spidev_register); #endif #if defined(CONFIG_MTD_DATAFLASH) /* MTD Data FLASH driver registration */ static int __init lpc313x_spimtd_register(void) { struct spi_board_info info = { .modalias = "mtd_dataflash", .max_speed_hz = 30000000, .bus_num = 0, .chip_select = 0, }; return spi_register_board_info(&info, 1); } arch_initcall(lpc313x_spimtd_register); #endif #endif static struct platform_device *devices[] __initdata = { &lpc313x_mci_device, #if defined (CONFIG_MTD_NAND_LPC313X) &lpc313x_nand_device, #endif #if defined(CONFIG_SPI_LPC313X) &lpc313x_spi_device, #endif }; static struct map_desc ea313x_io_desc[] __initdata = { { .virtual = io_p2v(EXT_SRAM0_PHYS), .pfn = __phys_to_pfn(EXT_SRAM0_PHYS), .length = SZ_4K, .type = MT_DEVICE }, { .virtual = io_p2v(EXT_SRAM1_PHYS + 0x10000), .pfn = __phys_to_pfn(EXT_SRAM1_PHYS + 0x10000), .length = SZ_4K, .type = MT_DEVICE }, { .virtual = io_p2v(IO_SDMMC_PHYS), .pfn = __phys_to_pfn(IO_SDMMC_PHYS), .length = IO_SDMMC_SIZE, .type = MT_DEVICE }, { .virtual = io_p2v(IO_USB_PHYS),
static int xen_add_device(struct device *dev) { int r; struct pci_dev *pci_dev = to_pci_dev(dev); #ifdef CONFIG_PCI_IOV struct pci_dev *physfn = pci_dev->physfn; #endif if (pci_seg_supported) { struct physdev_pci_device_add add = { .seg = pci_domain_nr(pci_dev->bus), .bus = pci_dev->bus->number, .devfn = pci_dev->devfn }; #ifdef CONFIG_ACPI acpi_handle handle; #endif #ifdef CONFIG_PCI_IOV if (pci_dev->is_virtfn) { add.flags = XEN_PCI_DEV_VIRTFN; add.physfn.bus = physfn->bus->number; add.physfn.devfn = physfn->devfn; } else #endif if (pci_ari_enabled(pci_dev->bus) && PCI_SLOT(pci_dev->devfn)) add.flags = XEN_PCI_DEV_EXTFN; #ifdef CONFIG_ACPI handle = DEVICE_ACPI_HANDLE(&pci_dev->dev); if (!handle) handle = DEVICE_ACPI_HANDLE(pci_dev->bus->bridge); #ifdef CONFIG_PCI_IOV if (!handle && pci_dev->is_virtfn) handle = DEVICE_ACPI_HANDLE(physfn->bus->bridge); #endif if (handle) { acpi_status status; do { unsigned long long pxm; status = acpi_evaluate_integer(handle, "_PXM", NULL, &pxm); if (ACPI_SUCCESS(status)) { add.optarr[0] = pxm; add.flags |= XEN_PCI_DEV_PXM; break; } status = acpi_get_parent(handle, &handle); } while (ACPI_SUCCESS(status)); } #endif /* CONFIG_ACPI */ r = HYPERVISOR_physdev_op(PHYSDEVOP_pci_device_add, &add); if (r != -ENOSYS) return r; pci_seg_supported = false; } if (pci_domain_nr(pci_dev->bus)) r = -ENOSYS; #ifdef CONFIG_PCI_IOV else if (pci_dev->is_virtfn) { struct physdev_manage_pci_ext manage_pci_ext = { .bus = pci_dev->bus->number, .devfn = pci_dev->devfn, .is_virtfn = 1, .physfn.bus = physfn->bus->number, .physfn.devfn = physfn->devfn, }; r = HYPERVISOR_physdev_op(PHYSDEVOP_manage_pci_add_ext, &manage_pci_ext); } #endif else if (pci_ari_enabled(pci_dev->bus) && PCI_SLOT(pci_dev->devfn)) { struct physdev_manage_pci_ext manage_pci_ext = { .bus = pci_dev->bus->number, .devfn = pci_dev->devfn, .is_extfn = 1, }; r = HYPERVISOR_physdev_op(PHYSDEVOP_manage_pci_add_ext, &manage_pci_ext); } else { struct physdev_manage_pci manage_pci = { .bus = pci_dev->bus->number, .devfn = pci_dev->devfn, }; r = HYPERVISOR_physdev_op(PHYSDEVOP_manage_pci_add, &manage_pci); } return r; } static int xen_remove_device(struct device *dev) { int r; struct pci_dev *pci_dev = to_pci_dev(dev); if (pci_seg_supported) { struct physdev_pci_device device = { .seg = pci_domain_nr(pci_dev->bus), .bus = pci_dev->bus->number, .devfn = pci_dev->devfn }; r = HYPERVISOR_physdev_op(PHYSDEVOP_pci_device_remove, &device); } else if (pci_domain_nr(pci_dev->bus)) r = -ENOSYS; else { struct physdev_manage_pci manage_pci = { .bus = pci_dev->bus->number, .devfn = pci_dev->devfn }; r = HYPERVISOR_physdev_op(PHYSDEVOP_manage_pci_remove, &manage_pci); } return r; } static int xen_pci_notifier(struct notifier_block *nb, unsigned long action, void *data) { struct device *dev = data; int r = 0; switch (action) { case BUS_NOTIFY_ADD_DEVICE: r = xen_add_device(dev); break; case BUS_NOTIFY_DEL_DEVICE: r = xen_remove_device(dev); break; default: return NOTIFY_DONE; } if (r) dev_err(dev, "Failed to %s - passthrough or MSI/MSI-X might fail!\n", action == BUS_NOTIFY_ADD_DEVICE ? "add" : (action == BUS_NOTIFY_DEL_DEVICE ? "delete" : "?")); return NOTIFY_OK; } static struct notifier_block device_nb = { .notifier_call = xen_pci_notifier, }; static int __init register_xen_pci_notifier(void) { if (!xen_initial_domain()) return 0; return bus_register_notifier(&pci_bus_type, &device_nb); } arch_initcall(register_xen_pci_notifier);