static void async_hcall(unsigned long call, unsigned long arg1, unsigned long arg2, unsigned long arg3, unsigned long arg4) { /* Note: This code assumes we're uniprocessor. */ static unsigned int next_call; unsigned long flags; /* * Disable interrupts if not already disabled: we don't want an * interrupt handler making a hypercall while we're already doing * one! */ local_irq_save(flags); if (lguest_data.hcall_status[next_call] != 0xFF) { /* Table full, so do normal hcall which will flush table. */ hcall(call, arg1, arg2, arg3, arg4); } else { lguest_data.hcalls[next_call].arg0 = call; lguest_data.hcalls[next_call].arg1 = arg1; lguest_data.hcalls[next_call].arg2 = arg2; lguest_data.hcalls[next_call].arg3 = arg3; lguest_data.hcalls[next_call].arg4 = arg4; /* Arguments must all be written before we mark it to go */ wmb(); lguest_data.hcall_status[next_call] = 0; if (++next_call == LHCALL_RING_SIZE) next_call = 0; } local_irq_restore(flags); }
static void lazy_hcall1(unsigned long call, unsigned long arg1) { if (paravirt_get_lazy_mode() == PARAVIRT_LAZY_NONE) hcall(call, arg1, 0, 0, 0); else async_hcall(call, arg1, 0, 0, 0); }
static void lguest_load_gdt(const struct desc_ptr *desc) { unsigned int i; struct desc_struct *gdt = (void *)desc->address; for (i = 0; i < (desc->size+1)/8; i++) hcall(LHCALL_LOAD_GDT_ENTRY, i, gdt[i].a, gdt[i].b, 0); }
static void lguest_write_gdt_entry(struct desc_struct *dt, int entrynum, const void *desc, int type) { native_write_gdt_entry(dt, entrynum, desc, type); /* Tell Host about this new entry. */ hcall(LHCALL_LOAD_GDT_ENTRY, entrynum, dt[entrynum].a, dt[entrynum].b, 0); }
/* When the virtio_ring code wants to prod the Host, it calls us here and we * make a hypercall. We hand the page number of the virtqueue so the Host * knows which virtqueue we're talking about. */ static void lg_notify(struct virtqueue *vq) { /* We store our virtqueue information in the "priv" pointer of the * virtqueue structure. */ struct lguest_vq_info *lvq = vq->priv; hcall(LHCALL_NOTIFY, lvq->config.pfn << PAGE_SHIFT, 0, 0); }
/* To notify on status updates, we (ab)use the NOTIFY hypercall, with the * descriptor address of the device. A zero status means "reset". */ static void set_status(struct virtio_device *vdev, u8 status) { unsigned long offset = (void *)to_lgdev(vdev)->desc - lguest_devices; /* We set the status. */ to_lgdev(vdev)->desc->status = status; hcall(LHCALL_NOTIFY, (max_pfn<<PAGE_SHIFT) + offset, 0, 0); }
static void lazy_hcall4(unsigned long call, unsigned long arg1, unsigned long arg2, unsigned long arg3, unsigned long arg4) { if (paravirt_get_lazy_mode() == PARAVIRT_LAZY_NONE) hcall(call, arg1, arg2, arg3, arg4); else async_hcall(call, arg1, arg2, arg3, arg4); }
/*G:034 * The Interrupt Descriptor Table (IDT). * * The IDT tells the processor what to do when an interrupt comes in. Each * entry in the table is a 64-bit descriptor: this holds the privilege level, * address of the handler, and... well, who cares? The Guest just asks the * Host to make the change anyway, because the Host controls the real IDT. */ static void lguest_write_idt_entry(gate_desc *dt, int entrynum, const gate_desc *g) { /* The gate_desc structure is 8 bytes long: we hand it to the Host in * two 32-bit chunks. The whole 32-bit kernel used to hand descriptors * around like this; typesafety wasn't a big concern in Linux's early * years. */ u32 *desc = (u32 *)g; /* Keep the local copy up to date. */ native_write_idt_entry(dt, entrynum, g); /* Tell Host about this new entry. */ hcall(LHCALL_LOAD_IDT_ENTRY, entrynum, desc[0], desc[1]); }
static uval32 add_vterm(sval chan, uval lpid) { int ret; hargs.opcode = H_VIO_CTL; hargs.args[0] = HVIO_ACQUIRE; hargs.args[1] = HVIO_VTERM; hargs.args[2] = 0; ret = hcall(&hargs); ASSERT(ret >= 0 && hargs.retval == 0, "hcall failure: %d " UVAL_CHOOSE("0x%x", "0x%lx") "\n", ret, hargs.retval); uval32 liobn = hargs.args[0]; printf("vterm %lx liobn: %x\n", chan, liobn); hargs.opcode = H_RESOURCE_TRANSFER; hargs.args[0] = INTR_SRC; hargs.args[1] = liobn; hargs.args[2] = 0; hargs.args[3] = 0; hargs.args[4] = lpid; hcall(&hargs); ASSERT(ret >= 0 && hargs.retval == 0, "hcall failure: %d " UVAL_CHOOSE("0x%x", "0x%lx") "\n", ret, hargs.retval); /* FIXME on failure HVIO_RELEASE */ if (chan < 0) { chan = liobn; } add_vty(chan); return liobn; }
static __init int early_put_chars(u32 vtermno, const char *buf, int count) { char scratch[17]; unsigned int len = count; /* We use a nul-terminated string, so we make a copy. Icky, huh? */ if (len > sizeof(scratch) - 1) len = sizeof(scratch) - 1; scratch[len] = '\0'; memcpy(scratch, buf, len); hcall(LHCALL_NOTIFY, __pa(scratch), 0, 0, 0); /* This routine returns the number of bytes actually written. */ return len; }
static int lguest_clockevent_set_next_event(unsigned long delta, struct clock_event_device *evt) { /* FIXME: I don't think this can ever happen, but James tells me he had * to put this code in. Maybe we should remove it now. Anyone? */ if (delta < LG_CLOCK_MIN_DELTA) { if (printk_ratelimit()) printk(KERN_DEBUG "%s: small delta %lu ns\n", __func__, delta); return -ETIME; } /* Please wake us this far in the future. */ hcall(LHCALL_SET_CLOCKEVENT, delta, 0, 0, 0); return 0; }
static void lguest_clockevent_set_mode(enum clock_event_mode mode, struct clock_event_device *evt) { switch (mode) { case CLOCK_EVT_MODE_UNUSED: case CLOCK_EVT_MODE_SHUTDOWN: /* A 0 argument shuts the clock down. */ hcall(LHCALL_SET_CLOCKEVENT, 0, 0, 0, 0); break; case CLOCK_EVT_MODE_ONESHOT: /* This is what we expect. */ break; case CLOCK_EVT_MODE_PERIODIC: BUG(); case CLOCK_EVT_MODE_RESUME: break; } }
static int add_vscsi(uval lpid, uval liobn, uval dma_sz) { static const char fmt[] = "/vdevice/v-scsi@%x"; char vscsi_node[sizeof (fmt) + 8 - 1]; uval32 intr[2] = { /* source */ 0x0, /* +edge */ 0x0 }; uval32 dma[] = { /* client */ /* liobn */ 0x0, /* phys */ 0x0, 0x0, /* size */ 0x0, 0x0 }; uval32 val; int ret; hargs.opcode = H_RESOURCE_TRANSFER; hargs.args[0] = INTR_SRC; hargs.args[1] = liobn; hargs.args[2] = 0; hargs.args[3] = 0; hargs.args[4] = lpid; ret = hcall(&hargs); ASSERT(ret >= 0 && hargs.retval == 0, "hcall vscsi transfer failure: %d " UVAL_CHOOSE("0x%x", "0x%lx") "\n", ret, hargs.retval); dma[0] = liobn; dma[4] = dma_sz; intr[0] = liobn; val = liobn; snprintf(vscsi_node, sizeof (vscsi_node), fmt, val); of_make_node(vscsi_node); of_set_prop(vscsi_node, "name", "v-scsi", -1); of_set_prop(vscsi_node, "device_type", "vscsi", -1); of_set_prop(vscsi_node, "compatible", "IBM,v-scsi", -1); of_set_prop(vscsi_node, "reg", &val, sizeof (val)); of_set_prop(vscsi_node, "ibm,my-dma-window", dma, sizeof (dma)); of_set_prop(vscsi_node, "interrupts", &intr, sizeof (intr)); return 0; }
int main(int argc, char **argv) { int hfd = hcall_init(); hold_args.size = 4096; if (argc > 1) { hold_args.laddr = strtoull(argv[1], NULL, 0); } if (hold_args.laddr == 0) { ioctl(hfd, OH_MEM_HOLD, &hold_args); printf("Holding " UVAL_CHOOSE("%x %x\n", "%lx %lx\n"), hold_args.laddr, hold_args.size); } char *ptr = mmap(NULL, hold_args.size, PROT_READ | PROT_WRITE, MAP_SHARED, hfd, hold_args.laddr); memset(ptr, 0xff, 4096); hargs.opcode = H_LPAR_INFO; hargs.args[0] = 0x1000; hargs.args[1] = hold_args.laddr; hcall(&hargs); oh_partition_info_t *pinfo = (typeof(pinfo)) ptr; printf("pinfo.htab_size: " UVAL_CHOOSE("%x", "%llx\n"), pinfo->htab_size); printf("pinfo.chunk_size: " UVAL_CHOOSE("%x", "%llx\n"), pinfo->chunk_size); printf("pinfo.large_page_size1: " UVAL_CHOOSE("%x", "%llx\n"), pinfo->large_page_size1); printf("pinfo.large_page_size2: " UVAL_CHOOSE("%x", "%llx\n"), pinfo->large_page_size2); return 0; }
/* * Handle (synchronous) user traps. These are all reflected * to the current partition, except for the hypervisor reserved * interrupts when they are issued by the partition OS. When * a hypervisor interrupt is generate by a partition application * (ring 3) it is always reflected to the guest OS. */ void trap(struct cpu_thread *thread, uval32 trapno) { uval cpl = RPL(thread->tss.srs.regs.cs); if (likely(trapno >= BASE_HCALL_VECTOR && cpl < 3)) { switch (trapno) { case HCALL_VECTOR: /* hcall */ hcall(thread); break; case HCALL_VECTOR_IRET: /* iret */ iret(thread); break; default: hprintf("%s: int 0x%ulx in reserved range - ignored\n", __func__, trapno); break; } return; } raise_exception(thread, trapno); }
static int add_memory(uval lpid, uval base, uval size) { uval64 laddr; hargs.opcode = H_RESOURCE_TRANSFER; hargs.args[0] = MEM_ADDR; hargs.args[1] = 0; hargs.args[2] = base; hargs.args[3] = size; hargs.args[4] = lpid; int ret = hcall(&hargs); ASSERT(ret >= 0 && hargs.retval == 0, "hcall failure: %d " UVAL_CHOOSE("0x%x", "0x%lx") "\n", ret, hargs.retval); laddr = hargs.args[0]; msg("add memory: %lx %lx -> %llx\n", base, size, laddr); of_add_memory(laddr, size); return 0; }
static int add_htab(uval lpid, uval size) { static uval32 ibm_pft_size[] = { 0x0, 0x0 }; uval s = 1; uval lsize = 0; char cpu_node[64]; uval cpu; int ret; while (s < size) { s <<= 1; ++lsize; } /* 1 << i is now the smallest log2 >= size */ lsize -= 6; /* 1/64 of i */ hargs.opcode = H_HTAB; hargs.args[0] = lpid; hargs.args[1] = lsize; ret = hcall(&hargs); ASSERT(ret >= 0, "hcall(HTAB, 0x%lx\n", lsize); ibm_pft_size[1] = lsize; cpu = 0; snprintf(cpu_node, sizeof (cpu_node), "cpus/cpu@%ld", cpu); of_set_prop(cpu_node, "ibm,pft-size", ibm_pft_size, sizeof (ibm_pft_size)); printf("ibm,pft-size: 0x%x, 0x%x\n", ibm_pft_size[0], ibm_pft_size[1]); return 0; }
/* * To notify on reset or feature finalization, we (ab)use the NOTIFY * hypercall, with the descriptor address of the device. */ static void status_notify(struct virtio_device *vdev) { unsigned long offset = (void *)to_lgdev(vdev)->desc - lguest_devices; hcall(LHCALL_NOTIFY, (max_pfn << PAGE_SHIFT) + offset, 0, 0, 0); }
static int add_llan(uval lpid) { int ret; hargs.opcode = H_VIO_CTL; hargs.args[0] = HVIO_ACQUIRE; hargs.args[1] = HVIO_LLAN; hargs.args[2] = ((8 << 12) >> 3) << 12; ret = hcall(&hargs); ASSERT(ret >= 0 && hargs.retval == 0, "hcall failure: %d " UVAL_CHOOSE("0x%x", "0x%lx") "\n", ret, hargs.retval); uval32 liobn = hargs.args[0]; uval64 dma_sz = hargs.args[2]; hargs.opcode = H_RESOURCE_TRANSFER; hargs.args[0] = INTR_SRC; hargs.args[1] = liobn; hargs.args[2] = 0; hargs.args[3] = 0; hargs.args[4] = lpid; ret = hcall(&hargs); ASSERT(ret >= 0 && hargs.retval == 0, "hcall failure: %d " UVAL_CHOOSE("0x%x", "0x%lx") "\n", ret, hargs.retval); /* FIXME on failure HVIO_RELEASE */ uval32 dma[] = { liobn, 0, 0, dma_sz >> 32, dma_sz & 0xffffffff }; uval8 mac[ETH_ALEN] = { 0x02, 0x00, }; *(uval32 *)&mac[2] = liobn; char llan_node[64]; snprintf(llan_node, sizeof(llan_node), "/vdevice/l-lan@%x", liobn); of_make_node(llan_node); of_set_prop(llan_node, "name", "l-lan", -1); of_set_prop(llan_node, "compatible", "IBM,l-lan", -1); of_set_prop(llan_node, "device_type", "network", -1); of_set_prop(llan_node, "reg", &liobn, sizeof (liobn)); uval32 val = 2; of_set_prop(llan_node, "ibm,#dma-address-cells", &val, sizeof (val)); of_set_prop(llan_node, "ibm,#dma-size-cells", &val, sizeof (val)); val = 255; of_set_prop(llan_node, "ibm,mac-address-filters", &val, sizeof (val)); val = 0; of_set_prop(llan_node, "ibm,vserver", &val, sizeof (val)); of_set_prop(llan_node, "local-mac-address", mac, sizeof (mac)); of_set_prop(llan_node, "mac-address", mac, sizeof (mac)); of_set_prop(llan_node, "ibm,my-dma-window", dma, sizeof (dma)); of_set_prop(llan_node, "interrupts", &liobn, sizeof (liobn)); return 0; }
static void lguest_restart(char *reason) { hcall(LHCALL_SHUTDOWN, __pa(reason), LGUEST_SHUTDOWN_RESTART, 0, 0); }
static int lguest_panic(struct notifier_block *nb, unsigned long l, void *p) { hcall(LHCALL_SHUTDOWN, __pa(p), LGUEST_SHUTDOWN_POWEROFF, 0, 0); /* The hcall won't return, but to keep gcc happy, we're "done". */ return NOTIFY_DONE; }
/* STOP! Until an interrupt comes in. */ static void lguest_safe_halt(void) { hcall(LHCALL_HALT, 0, 0, 0, 0); }
static void lguest_power_off(void) { hcall(LHCALL_SHUTDOWN, __pa("Power down"), LGUEST_SHUTDOWN_POWEROFF, 0, 0); }
int main(int argc, char **argv) { (void)argc; (void)argv; int fd = hcall_init(); uval laddr = mem_hold(4096); char *ptr = mmap(NULL, 4096, PROT_READ | PROT_WRITE, MAP_SHARED, fd, laddr); args.opcode = H_CREATE_MSGQ; args.args[0] = laddr; args.args[1] = 4096; args.args[2] = 0; hcall(&args); uval xirr = args.args[0]; printf(UVAL_CHOOSE("hcall: %x (%lx) %x %x\n", "hcall: %lx (%lx) %lx %lx\n"), args.retval, xirr, args.args[1], args.args[2]); sigfillset(&msg_sig.sa_mask); sigaction(SIGRTMIN, &msg_sig, NULL); ora.oh_interrupt = xirr; ora.oh_signal = SIGRTMIN; ioctl(fd, OH_IRQ_REFLECT, &ora); struct msg_queue *mq = (struct msg_queue *)ptr; printf(UVAL_CHOOSE("msgq: %lx %lx %lx\n", "msgq: %lx %lx %lx\n"), mq->bufSize, mq->head, mq->tail); uval lpid; args.opcode = H_GET_LPID; hcall(&args); lpid = args.args[0]; printf(UVAL_CHOOSE("self lpid: %lx\n", "self lpid: %lx\n"), lpid); while (1) { sigset_t set; sigemptyset(&set); sigaddset(&set, SIGRTMIN); siginfo_t info; int ret = sigwaitinfo(&set, &info); printf("sigwaitinfo: %d\n", ret); printf(UVAL_CHOOSE("msgq: %lx %lx %lx\n", "msgq: %lx %lx %lx\n"), mq->bufSize, mq->head, mq->tail); int tail = mq->tail % mq->bufSize; struct async_msg_s *msg = &mq->buffer[tail]; printf(UVAL_CHOOSE("msg from: %lx\n", "msg from: %lx\n"), msg->am_source); printf(UVAL_CHOOSE("msg raw data: %lx %lx %lx %lx\n", "msg raw data: %lx %lx %lx %lx\n"), msg->am_data.amu_data[0], msg->am_data.amu_data[1], msg->am_data.amu_data[2], msg->am_data.amu_data[3]); ++mq->tail; } close(fd); return 0; }
int main(int argc, char **argv) { char scratch[128]; hcall_fd = hcall_init(); int ret = parse_args(argc, argv); if (ret < 0) return ret; uval total = 0; ASSERT(num_ranges > 0, "No memory ranges specified\n"); ret = get_file("state", scratch, sizeof(scratch)); if (ret <= 0 || strncmp(scratch, "READY", ret) != 0) { bailout("Partition not ready\n"); } uval rmo_start = lranges[0].lr_base; uval rmo_size = lranges[0].lr_size; uval laddr = 0; uval img_base = 0; uval img_offset = 0; int count = 0; while (count < 255) { char image[256]; char image_laddr[256]; char data[64]; struct stat sbuf; uval base; snprintf(image, sizeof(image), HYPE_ROOT "/%s/image%02x", oh_pname, count); snprintf(image_laddr, sizeof(image_laddr), "image%02x_load", count); ++count; ret = stat(image, &sbuf); if (ret < 0) continue; ret = get_file(image_laddr, data, sizeof(data)); if (ret >= 0) { uval64 l = strtoull(data, NULL, 0); ASSERT(errno != ERANGE, "Corrupted data %s\n", image_laddr); laddr = l; } int size = laddr_load(image, rmo_start + laddr, &base); if (0 == img_base) { img_base = base; img_offset = laddr; } laddr = ALIGN_UP(laddr + size, PGSIZE); } char pinfo_buf[64]; uval64 pinfo; ret = get_file("pinfo", pinfo_buf, sizeof(pinfo_buf)); if (ret <= 0 || (pinfo = strtoull(pinfo_buf, NULL, 0)) <= 0) { pinfo = (uval64)-1; } uval lpid; hargs.opcode = H_CREATE_PARTITION; hargs.args[0] = rmo_start; hargs.args[1] = rmo_size; hargs.args[2] = pinfo; ret = hcall(&hargs); ASSERT(ret >= 0 && hargs.retval == 0, "hcall failure: %d " UVAL_CHOOSE("0x%x", "0x%lx") "\n", ret, hargs.retval); lpid = hargs.args[0]; set_file_printf("of_tree/ibm,partition-no", "0x%llx", lpid); set_file_printf("lpid", "0x%llx", lpid); set_file("state", "CREATED", -1); clean_vdevices(); of_add_memory(0, rmo_size); total += rmo_size; int i = 1; while (i < num_ranges) { uval64 base = lranges[i].lr_base; uval64 size = lranges[i].lr_size; add_memory(lpid, base, size); total += size; ++i; } #ifdef __PPC__ add_htab(lpid, total); #endif if (get_file_numeric("res_console_srv", &console_ua) < 0) { console_ua = 0; } else { vtys = 1; } uval64 vty0 = 0; if (vtys == 0 && console_ua == 0) { add_vty(0); } else { if (vtys > 0) { /* add vty and force it to be vty@0 */ vty0 = add_vterm(0, lpid); --vtys; } while (vtys > 0) { /* add vty and let it be the proper liobn */ add_vterm(-1, lpid); } } if (crq > 0) { ret = add_vdev(lpid, crq); if (ret == -1) { fprintf(stderr, "vdev failed : 0x%lx\n", crq); return 1; } } if (console_ua) { printf("Registering console vterm: " UVAL_CHOOSE("0x%lx","0x%llx")" -> 0x%lx:0x%llx\n", console_ua, lpid, vty0); hargs.opcode = H_REGISTER_VTERM; hargs.args[0] = console_ua; hargs.args[1] = lpid; hargs.args[2] = vty0; ret = hcall(&hargs); ASSERT(ret >= 0 && hargs.retval == 0, "hcall failure: %d " UVAL_CHOOSE("0x%x", "0x%lx") "\n", ret, hargs.retval); } add_llan(lpid); #ifdef __PPC__ load_of(lranges[0].lr_base, lranges[0].lr_size); #endif #ifdef __i386__ if (0 != img_base) { static struct partition_info part_info[2]; fill_pinfo(&part_info[1], lpid, rmo_size); if (inject_pinfo(part_info, sizeof(part_info), img_base, img_offset)) { add_cmdlineargs(img_base, 0x1000, /* offset in image for data */ lpid); } } #endif hargs.opcode = H_SET_SCHED_PARAMS; hargs.args[0] = lpid; hargs.args[1] = 0; hargs.args[2] = 1; hargs.args[3] = 0; ret = hcall(&hargs); ASSERT(ret >= 0 && hargs.retval == 0, "hcall failure: %d " UVAL_CHOOSE("0x%x", "0x%lx") "\n", ret, hargs.retval); hargs.opcode = H_START; hargs.args[0] = lpid; char buf[64]; ret = get_file("pc", buf, sizeof(buf)); if (ret >= 0) { buf[ret] = 0; hargs.args[1] = strtoull(buf, NULL, 0); } else { hargs.args[1] = 0; } int x = 2; while (x < 8) { int y = 0; snprintf(buf, sizeof(buf), "r%d", x); y = get_file(buf, buf, sizeof(buf)); if (y >= 0) { buf[y] = 0; hargs.args[x] = strtoull(buf, NULL, 0); } else { hargs.args[x] = 0; } ++x; } if (wait_for_key) { printf("waiting for keypress:"); (void)fgetc(stdin); } printf("Starting...\n"); hcall(&hargs); ASSERT(ret >= 0 && hargs.retval == 0, "hcall failure: %d " UVAL_CHOOSE("0x%x", "0x%lx") "\n", ret, hargs.retval); set_file("state", "RUNNING", -1); return 0; }
/* When lazy mode is turned off reset the per-cpu lazy mode variable and then * issue the do-nothing hypercall to flush any stored calls. */ static void lguest_leave_lazy_mode(void) { paravirt_leave_lazy(paravirt_get_lazy_mode()); hcall(LHCALL_FLUSH_ASYNC, 0, 0, 0); }
/* * The Global Descriptor Table. * * The Intel architecture defines another table, called the Global Descriptor * Table (GDT). You tell the CPU where it is (and its size) using the "lgdt" * instruction, and then several other instructions refer to entries in the * table. There are three entries which the Switcher needs, so the Host simply * controls the entire thing and the Guest asks it to make changes using the * LOAD_GDT hypercall. * * This is the opposite of the IDT code where we have a LOAD_IDT_ENTRY * hypercall and use that repeatedly to load a new IDT. I don't think it * really matters, but wouldn't it be nice if they were the same? Wouldn't * it be even better if you were the one to send the patch to fix it? */ static void lguest_load_gdt(const struct desc_ptr *desc) { BUG_ON((desc->size+1)/8 != GDT_ENTRIES); hcall(LHCALL_LOAD_GDT, __pa(desc->address), GDT_ENTRIES, 0); }
/* For a single GDT entry which changes, we do the lazy thing: alter our GDT, * then tell the Host to reload the entire thing. This operation is so rare * that this naive implementation is reasonable. */ static void lguest_write_gdt_entry(struct desc_struct *dt, int entrynum, const void *desc, int type) { native_write_gdt_entry(dt, entrynum, desc, type); hcall(LHCALL_LOAD_GDT, __pa(dt), GDT_ENTRIES, 0); }
static void lguest_end_context_switch(struct task_struct *next) { hcall(LHCALL_FLUSH_ASYNC, 0, 0, 0, 0); paravirt_end_context_switch(next); }
static void lguest_leave_lazy_mmu_mode(void) { hcall(LHCALL_FLUSH_ASYNC, 0, 0, 0, 0); paravirt_leave_lazy_mmu(); }