static void xenbus_dev_shutdown(struct device *_dev) { struct xenbus_device *dev = to_xenbus_device(_dev); unsigned long timeout = 5*HZ; DPRINTK("%s", dev->nodename); /* Commented out since xenstored stubdom is now minios based not linux based #define XENSTORE_DOMAIN_SHARES_THIS_KERNEL */ #ifndef XENSTORE_DOMAIN_SHARES_THIS_KERNEL if (is_initial_xendomain()) #endif return; get_device(&dev->dev); if (dev->state != XenbusStateConnected) { printk("%s: %s: %s != Connected, skipping\n", __FUNCTION__, dev->nodename, xenbus_strstate(dev->state)); goto out; } xenbus_switch_state(dev, XenbusStateClosing); timeout = wait_for_completion_timeout(&dev->down, timeout); if (!timeout) printk("%s: %s timeout closing device\n", __FUNCTION__, dev->nodename); out: put_device(&dev->dev); }
static unsigned int new_space (u64 phys_base, int sparse) { u64 mmio_base; int i; if (phys_base == 0) return 0; /* legacy I/O port space */ mmio_base = (u64) ioremap(phys_base, 0); for (i = 0; i < num_io_spaces; i++) if (io_space[i].mmio_base == mmio_base && io_space[i].sparse == sparse) return i; if (num_io_spaces == MAX_IO_SPACES) { printk(KERN_ERR "PCI: Too many IO port spaces " "(MAX_IO_SPACES=%lu)\n", MAX_IO_SPACES); return ~0; } i = num_io_spaces++; io_space[i].mmio_base = mmio_base; io_space[i].sparse = sparse; #ifdef CONFIG_XEN if (is_initial_xendomain()) HYPERVISOR_add_io_space(phys_base, sparse, i); #endif return i; }
int xencons_ring_init(void) { int irq; if (xencons_irq) unbind_from_irqhandler(xencons_irq, NULL); xencons_irq = 0; if (!is_running_on_xen() || is_initial_xendomain() || !xen_start_info->console.domU.evtchn) return -ENODEV; irq = bind_caller_port_to_irqhandler( xen_start_info->console.domU.evtchn, handle_input, 0, "xencons", NULL); if (irq < 0) { printk(KERN_ERR "XEN console request irq failed %i\n", irq); return irq; } xencons_irq = irq; /* In case we have in-flight data after save/restore... */ notify_daemon(); return 0; }
/* * Returns the effective MTRR type for the region * Error returns: * - 0xFE - when the range is "not entirely covered" by _any_ var range MTRR * - 0xFF - when MTRR is not enabled */ u8 mtrr_type_lookup(u64 start, u64 end) { int i, error; u64 start_mfn, end_mfn, base_mfn, top_mfn; u8 prev_match, curr_match; struct xen_platform_op op; if (!is_initial_xendomain()) return MTRR_TYPE_WRBACK; if (!num_var_ranges) return 0xFF; start_mfn = start >> PAGE_SHIFT; /* Make end inclusive end, instead of exclusive */ end_mfn = --end >> PAGE_SHIFT; /* Look in fixed ranges. Just return the type as per start */ if (start_mfn < 0x100) { #if 0//todo op.cmd = XENPF_read_memtype; op.u.read_memtype.reg = ???; error = HYPERVISOR_platform_op(&op); if (!error) return op.u.read_memtype.type; #endif return MTRR_TYPE_UNCACHABLE; }
static int __cpuinit setup_cpu_watcher(struct notifier_block *notifier, unsigned long event, void *data) { unsigned int i; static struct xenbus_watch __cpuinitdata cpu_watch = { .node = "cpu", .callback = handle_vcpu_hotplug_event, .flags = XBWF_new_thread }; (void)register_xenbus_watch(&cpu_watch); if (!is_initial_xendomain()) { for_each_possible_cpu(i) vcpu_hotplug(i); printk(KERN_INFO "Brought up %ld CPUs\n", (long)num_online_cpus()); } return NOTIFY_DONE; } static int __init setup_vcpu_hotplug_event(void) { static struct notifier_block hotplug_cpu = { .notifier_call = smpboot_cpu_notify }; static struct notifier_block __cpuinitdata xsn_cpu = { .notifier_call = setup_cpu_watcher }; if (!is_running_on_xen()) return -ENODEV; register_cpu_notifier(&hotplug_cpu); register_xenstore_notifier(&xsn_cpu); return 0; } arch_initcall(setup_vcpu_hotplug_event); int __ref smp_suspend(void) { unsigned int cpu; int err; for_each_online_cpu(cpu) { if (cpu == 0) continue; err = cpu_down(cpu); if (err) { printk(KERN_CRIT "Failed to take all CPUs " "down: %d.\n", err); for_each_possible_cpu(cpu) vcpu_hotplug(cpu); return err; } } return 0; }
static int __init xenkbd_init(void) { if (!is_running_on_xen()) return -ENODEV; /* Nothing to do if running in dom0. */ if (is_initial_xendomain()) return -ENODEV; return xenbus_register_frontend(&xenkbd_driver); }
static void __init i386_default_early_setup(void) { /* Initialize 32bit specific setup functions */ if (is_initial_xendomain()) x86_init.resources.probe_roms = probe_roms; x86_init.resources.reserve_resources = i386_reserve_resources; #ifndef CONFIG_XEN x86_init.mpparse.setup_ioapic_ids = setup_ioapic_ids_from_mpc; reserve_ebda_region(); #endif }
static void xen_get_rtc_time(struct rtc_time *tm) { if (is_initial_xendomain()) { host_md_get_rtc_time(tm); return; } else { ulong t; t = time_from_shared(); to_tm(t, tm); } }
static __init int add_pcspkr(void) { struct platform_device *pd; #ifdef CONFIG_XEN if (!is_initial_xendomain()) return 0; #endif pd = platform_device_register_simple("pcspkr", -1, NULL, 0); return IS_ERR(pd) ? PTR_ERR(pd) : 0; }
static void __xencons_tx_flush(void) { int sent, sz, work_done = 0; if (x_char) { if (is_initial_xendomain()) kcons_write_dom0(NULL, &x_char, 1); else while (x_char) if (xencons_ring_send(&x_char, 1) == 1) break; x_char = 0; work_done = 1; } while (wc != wp) { sz = wp - wc; if (sz > (wbuf_size - WBUF_MASK(wc))) sz = wbuf_size - WBUF_MASK(wc); if (is_initial_xendomain()) { kcons_write_dom0(NULL, &wbuf[WBUF_MASK(wc)], sz); wc += sz; } else { sent = xencons_ring_send(&wbuf[WBUF_MASK(wc)], sz); if (sent == 0) break; wc += sent; } work_done = 1; } if (work_done && (xencons_tty != NULL)) { wake_up_interruptible(&xencons_tty->write_wait); if ((xencons_tty->flags & (1 << TTY_DO_WRITE_WAKEUP)) && (xencons_tty->ldisc.write_wakeup != NULL)) (xencons_tty->ldisc.write_wakeup)(xencons_tty); } }
int xenbus_conn(domid_t remote_dom, unsigned long *grant_ref, evtchn_port_t *local_port) { struct evtchn_alloc_unbound alloc_unbound; int rc, rc2; BUG_ON(atomic_read(&xenbus_xsd_state) != XENBUS_XSD_FOREIGN_INIT); BUG_ON(!is_initial_xendomain()); #if defined(CONFIG_PROC_FS) && defined(CONFIG_XEN_PRIVILEGED_GUEST) remove_xen_proc_entry("xsd_kva"); remove_xen_proc_entry("xsd_port"); #endif rc = xb_free_port(xen_store_evtchn); if (rc != 0) goto fail0; alloc_unbound.dom = DOMID_SELF; alloc_unbound.remote_dom = remote_dom; rc = HYPERVISOR_event_channel_op(EVTCHNOP_alloc_unbound, &alloc_unbound); if (rc != 0) goto fail0; *local_port = xen_store_evtchn = alloc_unbound.port; /* keep the old page (xen_store_mfn, xen_store_interface) */ rc = gnttab_grant_foreign_access(remote_dom, xen_store_mfn, GTF_permit_access); if (rc < 0) goto fail1; *grant_ref = rc; rc = xb_init_comms(); if (rc != 0) goto fail1; return 0; fail1: rc2 = xb_free_port(xen_store_evtchn); if (rc2 != 0) printk(KERN_WARNING "XENBUS: Error freeing xenstore event channel: %d\n", rc2); fail0: xen_store_evtchn = -1; return rc; }
static unsigned long __init xen_get_boot_time(void) { ulong t; if (is_initial_xendomain()) { t = host_md_get_boot_time(); HYPERVISOR_shared_info->wc_sec = t; HYPERVISOR_shared_info->arch.boot_timebase = mftb(); DBG("%s: time: %ld\n", __func__, t); } else { t = time_from_shared(); DBG("%s: %ld\n", __func__, t); } return t; }
static int __init xen_console_init(void) { if (!is_running_on_xen()) goto out; if (is_initial_xendomain()) { if (xc_mode == XC_DEFAULT) xc_mode = XC_SERIAL; kcons_info.write = kcons_write_dom0; } else { if (!xen_start_info->console.domU.evtchn) goto out; if (xc_mode == XC_DEFAULT) xc_mode = XC_XVC; kcons_info.write = kcons_write; } switch (xc_mode) { case XC_XVC: strcpy(kcons_info.name, "xvc"); if (xc_num == -1) xc_num = 0; break; case XC_SERIAL: strcpy(kcons_info.name, "ttyS"); if (xc_num == -1) xc_num = 0; break; case XC_TTY: strcpy(kcons_info.name, "tty"); if (xc_num == -1) xc_num = 1; break; default: goto out; } wbuf = alloc_bootmem(wbuf_size); register_console(&kcons_info); out: return 0; }
static int xen_set_rtc_time(struct rtc_time *tm) { ulong sec; if (is_initial_xendomain()) { host_md_set_rtc_time(tm); return 0; } sec = mktime(tm->tm_year, tm->tm_mon, tm->tm_mday, tm->tm_hour, tm->tm_min, tm->tm_sec); HYPERVISOR_shared_info->wc_sec = sec; HYPERVISOR_shared_info->arch.boot_timebase = mftb(); return 0; }
void __init copy_edd(void) { int ret; struct xen_platform_op op; if (!is_initial_xendomain()) return; op.cmd = XENPF_firmware_info; op.u.firmware_info.type = XEN_FW_DISK_INFO; for (op.u.firmware_info.index = 0; edd.edd_info_nr < EDDMAXNR; op.u.firmware_info.index++) { struct edd_info *info = edd.edd_info + edd.edd_info_nr; info->params.length = sizeof(info->params); set_xen_guest_handle(op.u.firmware_info.u.disk_info.edd_params, &info->params); ret = HYPERVISOR_platform_op(&op); if (ret) break; #define C(x) info->x = op.u.firmware_info.u.disk_info.x C(device); C(version); C(interface_support); C(legacy_max_cylinder); C(legacy_max_head); C(legacy_sectors_per_track); #undef C edd.edd_info_nr++; } op.u.firmware_info.type = XEN_FW_DISK_MBR_SIGNATURE; for (op.u.firmware_info.index = 0; edd.mbr_signature_nr < EDD_MBR_SIG_MAX; op.u.firmware_info.index++) { ret = HYPERVISOR_platform_op(&op); if (ret) break; edd.mbr_signature[edd.mbr_signature_nr++] = op.u.firmware_info.u.disk_mbr_signature.mbr_signature; } }
void __init copy_edid(void) { #if defined(CONFIG_FIRMWARE_EDID) && defined(CONFIG_X86) struct xen_platform_op op; if (!is_initial_xendomain()) return; op.cmd = XENPF_firmware_info; op.u.firmware_info.index = 0; op.u.firmware_info.type = XEN_FW_VBEDDC_INFO; set_xen_guest_handle(op.u.firmware_info.u.vbeddc_info.edid, edid_info.dummy); if (HYPERVISOR_platform_op(&op) != 0) memset(edid_info.dummy, 0x13, sizeof(edid_info.dummy)); #endif }
static int __init mtrr_init(void) { struct cpuinfo_x86 *c = &boot_cpu_data; if (!is_initial_xendomain()) return -ENODEV; if ((!cpu_has(c, X86_FEATURE_MTRR)) && (!cpu_has(c, X86_FEATURE_K6_MTRR)) && (!cpu_has(c, X86_FEATURE_CYRIX_ARR)) && (!cpu_has(c, X86_FEATURE_CENTAUR_MCR))) return -ENODEV; set_num_var_ranges(); init_table(); return 0; }
static int __init hook_pci_bus(void) { if (!is_running_on_xen() || !is_initial_xendomain()) return 0; pci_bus_probe = pci_bus_type.probe; pci_bus_type.probe = pci_bus_probe_wrapper; pci_bus_remove = pci_bus_type.remove; pci_bus_type.remove = pci_bus_remove_wrapper; #ifndef __ia64__ /* Make sure ACS will be enabled */ pci_request_acs(); #endif return 0; }
static int __init dcdrbu_init(void) { int rc; #ifdef CONFIG_XEN if (!is_initial_xendomain()) return -ENODEV; #endif spin_lock_init(&rbu_data.lock); init_packet_head(); rbu_device = platform_device_register_simple("dell_rbu", -1, NULL, 0); if (IS_ERR(rbu_device)) { printk(KERN_ERR "dell_rbu:%s:platform_device_register_simple " "failed\n", __func__); return PTR_ERR(rbu_device); } rc = sysfs_create_bin_file(&rbu_device->dev.kobj, &rbu_data_attr); if (rc) goto out_devreg; rc = sysfs_create_bin_file(&rbu_device->dev.kobj, &rbu_image_type_attr); if (rc) goto out_data; rc = sysfs_create_bin_file(&rbu_device->dev.kobj, &rbu_packet_size_attr); if (rc) goto out_imtype; rbu_data.entry_created = 0; return 0; out_imtype: sysfs_remove_bin_file(&rbu_device->dev.kobj, &rbu_image_type_attr); out_data: sysfs_remove_bin_file(&rbu_device->dev.kobj, &rbu_data_attr); out_devreg: platform_device_unregister(rbu_device); return rc; }
/*** Forcibly flush console data before dying. ***/ void xencons_force_flush(void) { int sz; /* Emergency console is synchronous, so there's nothing to flush. */ if (!is_running_on_xen() || is_initial_xendomain() || !xen_start_info->console.domU.evtchn) return; /* Spin until console data is flushed through to the daemon. */ while (wc != wp) { int sent = 0; if ((sz = wp - wc) == 0) continue; sent = xencons_ring_send(&wbuf[WBUF_MASK(wc)], sz); if (sent > 0) wc += sent; } }
void __init xen_init_IRQ(void) { int i; init_evtchn_cpu_bindings(); /* No event channels are 'live' right now. */ for (i = 0; i < NR_EVENT_CHANNELS; i++) mask_evtchn(i); /* No IRQ -> event-channel mappings. */ for (i = 0; i < NR_IRQS; i++) irq_info[i] = IRQ_UNBOUND; /* Dynamic IRQ space is currently unbound. Zero the refcnts. */ for (i = 0; i < NR_DYNIRQS; i++) { irq_bindcount[dynirq_to_irq(i)] = 0; irq_desc[dynirq_to_irq(i)].status = IRQ_DISABLED; irq_desc[dynirq_to_irq(i)].action = NULL; irq_desc[dynirq_to_irq(i)].depth = 1; irq_desc[dynirq_to_irq(i)].handler = &dynirq_type; } /* Phys IRQ space is statically bound (1:1 mapping). Nail refcnts. */ for (i = 0; i < NR_PIRQS; i++) { irq_bindcount[pirq_to_irq(i)] = 1; #ifdef RTC_IRQ /* If not domain 0, force our RTC driver to fail its probe. */ if ((i == RTC_IRQ) && !is_initial_xendomain()) continue; #endif irq_desc[pirq_to_irq(i)].status = IRQ_DISABLED; irq_desc[pirq_to_irq(i)].action = NULL; irq_desc[pirq_to_irq(i)].depth = 1; irq_desc[pirq_to_irq(i)].handler = &pirq_type; } }
static ulong find_grant_maps(void) { struct device_node *xen; u64 *gm; u64 _gm[2]; u64 expect; /* This value is currently hardcoded into the SLB logic that * it written in assempler, See * slb_miss_kernel_load_xen_linear for more information. * Anything else and we can not run. */ expect = 34 - PAGE_SHIFT; xen = of_find_node_by_path("/xen"); /* * The foreign is 2x2 Cells. * The first entry is log2 of the base page frame. * The second is the number of pages */ gm = (u64 *)get_property(xen, "foreign-map", NULL); if (gm == NULL) { if (!is_initial_xendomain()) { printk("OF: /xen/foreign-map not present\n"); _gm[0] = expect; _gm[1] = 2048; gm = _gm; } else panic("OF: /xen/foreign-map must be present\n"); } if (gm[0] != expect) panic("foreign-map is 0x%lx, expect 0x%lx\n", gm[0], expect); foreign_map_pfn = 1UL << gm[0]; return gm[1]; }
static int __init loopback_init(void) { int i, err = 0; struct net_device *tmp; if (nloopbacks == -1) { nloopbacks = 0; if (is_initial_xendomain()) { for(i = 1; (tmp = dev_get_by_index(i)); i++) { nloopbacks++; dev_put(tmp); } } } nloopbacks = max(nloopbacks, 4); for (i = 0; i < nloopbacks; i++) if ((err = make_loopback(i)) != 0) break; return err; }
static void xenbus_dev_shutdown(struct device *_dev) { struct xenbus_device *dev = to_xenbus_device(_dev); unsigned long timeout = 5*HZ; DPRINTK("%s", dev->nodename); if (is_initial_xendomain()) return; get_device(&dev->dev); if (dev->state != XenbusStateConnected) { dev_info(&dev->dev, "%s: %s: %s != Connected, skipping\n", __FUNCTION__, dev->nodename, xenbus_strstate(dev->state)); goto out; } xenbus_switch_state(dev, XenbusStateClosing); timeout = wait_for_completion_timeout(&dev->down, timeout); if (!timeout) dev_info(&dev->dev, "%s: %s timeout closing device\n", __FUNCTION__, dev->nodename); out: put_device(&dev->dev); }
static int __init xencons_init(void) { int rc; if (!is_running_on_xen()) return -ENODEV; if (xc_mode == XC_OFF) return 0; if (!is_initial_xendomain()) { rc = xencons_ring_init(); if (rc) return rc; } xencons_driver = alloc_tty_driver((xc_mode == XC_TTY) ? MAX_NR_CONSOLES : 1); if (xencons_driver == NULL) return -ENOMEM; DRV(xencons_driver)->name = "xencons"; DRV(xencons_driver)->major = TTY_MAJOR; DRV(xencons_driver)->type = TTY_DRIVER_TYPE_SERIAL; DRV(xencons_driver)->subtype = SERIAL_TYPE_NORMAL; DRV(xencons_driver)->init_termios = tty_std_termios; DRV(xencons_driver)->flags = TTY_DRIVER_REAL_RAW | TTY_DRIVER_RESET_TERMIOS | TTY_DRIVER_NO_DEVFS; DRV(xencons_driver)->termios = xencons_termios; DRV(xencons_driver)->termios_locked = xencons_termios_locked; switch (xc_mode) { case XC_XVC: DRV(xencons_driver)->name = "xvc"; DRV(xencons_driver)->major = XEN_XVC_MAJOR; DRV(xencons_driver)->minor_start = XEN_XVC_MINOR; DRV(xencons_driver)->name_base = xc_num; break; case XC_SERIAL: DRV(xencons_driver)->name = "ttyS"; DRV(xencons_driver)->minor_start = 64 + xc_num; DRV(xencons_driver)->name_base = xc_num; break; default: DRV(xencons_driver)->name = "tty"; DRV(xencons_driver)->minor_start = 1; DRV(xencons_driver)->name_base = 1; break; } tty_set_operations(xencons_driver, &xencons_ops); if ((rc = tty_register_driver(DRV(xencons_driver))) != 0) { printk("WARNING: Failed to register Xen virtual " "console driver as '%s%d'\n", DRV(xencons_driver)->name, DRV(xencons_driver)->name_base); put_tty_driver(xencons_driver); xencons_driver = NULL; return rc; } tty_register_device(xencons_driver, 0, NULL); if (is_initial_xendomain()) { xencons_priv_irq = bind_virq_to_irqhandler( VIRQ_CONSOLE, 0, xencons_priv_interrupt, 0, "console", NULL); BUG_ON(xencons_priv_irq < 0); } printk("Xen virtual console successfully installed as %s%d\n", DRV(xencons_driver)->name, xc_num); /* Check about framebuffer messing up the console */ if (!is_initial_xendomain() && !xenbus_exists(XBT_NIL, "device", "vfb")) { /* FIXME: this is ugly */ unregister_console(&kcons_info); kcons_info.flags |= CON_CONSDEV; register_console(&kcons_info); } return 0; }
void __init xen_start_kernel(void) { unsigned int i; struct xen_machphys_mapping mapping; unsigned long machine_to_phys_nr_ents; #ifdef CONFIG_X86_32 struct xen_platform_parameters pp; extern pte_t swapper_pg_fixmap[PTRS_PER_PTE]; unsigned long addr; #endif xen_setup_features(); if (HYPERVISOR_memory_op(XENMEM_machphys_mapping, &mapping) == 0) { machine_to_phys_mapping = (unsigned long *)mapping.v_start; machine_to_phys_nr_ents = mapping.max_mfn + 1; } else machine_to_phys_nr_ents = MACH2PHYS_NR_ENTRIES; while ((1UL << machine_to_phys_order) < machine_to_phys_nr_ents ) machine_to_phys_order++; if (!xen_feature(XENFEAT_auto_translated_physmap)) phys_to_machine_mapping = (unsigned long *)xen_start_info->mfn_list; WARN_ON(HYPERVISOR_vm_assist(VMASST_CMD_enable, VMASST_TYPE_writable_pagetables)); reserve_early(ALIGN(__pa_symbol(&_end), PAGE_SIZE), __pa(xen_start_info->pt_base) + (xen_start_info->nr_pt_frames << PAGE_SHIFT), "Xen provided"); #ifdef CONFIG_X86_32 WARN_ON(HYPERVISOR_vm_assist(VMASST_CMD_enable, VMASST_TYPE_4gb_segments)); init_mm.pgd = swapper_pg_dir = (pgd_t *)xen_start_info->pt_base; if (HYPERVISOR_xen_version(XENVER_platform_parameters, &pp) == 0) { hypervisor_virt_start = pp.virt_start; reserve_top_address(0UL - pp.virt_start); } BUG_ON(pte_index(hypervisor_virt_start)); /* Do an early initialization of the fixmap area */ make_lowmem_page_readonly(swapper_pg_fixmap, XENFEAT_writable_page_tables); addr = __fix_to_virt(FIX_EARLYCON_MEM_BASE); set_pmd(pmd_offset(pud_offset(swapper_pg_dir + pgd_index(addr), addr), addr), __pmd(__pa_symbol(swapper_pg_fixmap) | _PAGE_TABLE)); #else check_efer(); xen_init_pt(); #endif #define __FIXADDR_TOP (-PAGE_SIZE) #define pmd_index(addr) (((addr) >> PMD_SHIFT) & (PTRS_PER_PMD - 1)) #define FIX_BUG_ON(fix) BUILD_BUG_ON(pmd_index(__fix_to_virt(FIX_##fix)) \ != pmd_index(__fix_to_virt(FIX_EARLYCON_MEM_BASE))) FIX_BUG_ON(SHARED_INFO); FIX_BUG_ON(ISAMAP_BEGIN); FIX_BUG_ON(ISAMAP_END); #undef pmd_index #undef __FIXADDR_TOP /* Switch to the real shared_info page, and clear the dummy page. */ set_fixmap(FIX_SHARED_INFO, xen_start_info->shared_info); HYPERVISOR_shared_info = (shared_info_t *)fix_to_virt(FIX_SHARED_INFO); memset(empty_zero_page, 0, sizeof(empty_zero_page)); setup_vcpu_info(0); /* Set up mapping of lowest 1MB of physical memory. */ for (i = 0; i < NR_FIX_ISAMAPS; i++) if (is_initial_xendomain()) set_fixmap(FIX_ISAMAP_BEGIN - i, i * PAGE_SIZE); else __set_fixmap(FIX_ISAMAP_BEGIN - i, virt_to_machine(empty_zero_page), PAGE_KERNEL_RO); }
int __devinit gnttab_init(void) { int i; unsigned int max_nr_glist_frames, nr_glist_frames; unsigned int nr_init_grefs; if (!is_running_on_xen()) return -ENODEV; #if defined(CONFIG_XEN) && defined(CONFIG_PM_SLEEP) if (!is_initial_xendomain()) { int err = sysdev_class_register(&gnttab_sysclass); if (!err) err = sysdev_register(&device_gnttab); if (err) return err; } #endif nr_grant_frames = 1; boot_max_nr_grant_frames = __max_nr_grant_frames(); /* Determine the maximum number of frames required for the * grant reference free list on the current hypervisor. */ max_nr_glist_frames = nr_freelist_frames(boot_max_nr_grant_frames); gnttab_list = kmalloc(max_nr_glist_frames * sizeof(grant_ref_t *), GFP_KERNEL); if (gnttab_list == NULL) return -ENOMEM; nr_glist_frames = nr_freelist_frames(nr_grant_frames); for (i = 0; i < nr_glist_frames; i++) { gnttab_list[i] = (grant_ref_t *)__get_free_page(GFP_KERNEL); if (gnttab_list[i] == NULL) goto ini_nomem; } if (gnttab_resume() < 0) return -ENODEV; nr_init_grefs = nr_grant_frames * ENTRIES_PER_GRANT_FRAME; for (i = NR_RESERVED_ENTRIES; i < nr_init_grefs - 1; i++) gnttab_entry(i) = i + 1; gnttab_entry(nr_init_grefs - 1) = GNTTAB_LIST_END; gnttab_free_count = nr_init_grefs - NR_RESERVED_ENTRIES; gnttab_free_head = NR_RESERVED_ENTRIES; #if defined(CONFIG_XEN) && defined(__HAVE_ARCH_PTE_SPECIAL) if (!xen_feature(XENFEAT_auto_translated_physmap) && xen_feature(XENFEAT_gnttab_map_avail_bits)) { #ifdef CONFIG_X86 GNTMAP_pte_special = (__pte_val(pte_mkspecial(__pte_ma(0))) >> _PAGE_BIT_UNUSED1) << _GNTMAP_guest_avail0; #else #error Architecture not yet supported. #endif }