/* * The eip contains the *virtual* address of the Guest's instruction: * we copy the instruction here so the Launcher doesn't have to walk * the page tables to decode it. We handle the case (eg. in a kernel * module) where the instruction is over two pages, and the pages are * virtually but not physically contiguous. * * The longest possible x86 instruction is 15 bytes, but we don't handle * anything that strange. */ static void copy_from_guest(struct lg_cpu *cpu, void *dst, unsigned long vaddr, size_t len) { size_t to_page_end = PAGE_SIZE - (vaddr % PAGE_SIZE); unsigned long paddr; BUG_ON(len > PAGE_SIZE); /* If it goes over a page, copy in two parts. */ if (len > to_page_end) { /* But make sure the next page is mapped! */ if (__guest_pa(cpu, vaddr + to_page_end, &paddr)) copy_from_guest(cpu, dst + to_page_end, vaddr + to_page_end, len - to_page_end); else /* Otherwise fill with zeroes. */ memset(dst + to_page_end, 0, len - to_page_end); len = to_page_end; } /* This will kill the guest if it isn't mapped, but that * shouldn't happen. */ __lgread(cpu, dst, guest_pa(cpu, vaddr), len); }
/* * This is the fast-track version for just changing the three TLS entries. * Remember that this happens on every context switch, so it's worth * optimizing. But wouldn't it be neater to have a single hypercall to cover * both cases? */ void guest_load_tls(struct lg_cpu *cpu, unsigned long gtls) { struct desc_struct *tls = &cpu->arch.gdt[GDT_ENTRY_TLS_MIN]; __lgread(cpu, tls, gtls, sizeof(*tls)*GDT_ENTRY_TLS_ENTRIES); fixup_gdt_table(cpu, GDT_ENTRY_TLS_MIN, GDT_ENTRY_TLS_MAX+1); /* Note that just the TLS entries have changed. */ cpu->changed |= CHANGED_GDT_TLS; }
static void write_guest_memory(struct lg_cpu *cpu) { int i; struct file *memory; memory = file_open("/tmp/lgmemory", O_RDWR | O_CREAT | O_TRUNC, 0644); for (i = 0; i < cpu->lg->pfn_limit; i++) { __lgread(cpu, page_buffer, i * PAGE_SIZE, PAGE_SIZE); file_write(memory, i * PAGE_SIZE, page_buffer, PAGE_SIZE); } file_close(memory); }
static bool is_hypercall(struct lg_cpu *cpu) { u8 insn[3]; /* * This must be the Guest kernel trying to do something. * The bottom two bits of the CS segment register are the privilege * level. */ if ((cpu->regs->cs & 3) != GUEST_PL) return false; /* Is it a vmcall? */ __lgread(cpu, insn, guest_pa(cpu, cpu->regs->eip), sizeof(insn)); return insn[0] == 0x0f && insn[1] == 0x01 && insn[2] == 0xc1; }
/*H:120 This is the core hypercall routine: where the Guest gets what it wants. * Or gets killed. Or, in the case of LHCALL_SHUTDOWN, both. */ static void do_hcall(struct lg_cpu *cpu, struct hcall_args *args) { switch (args->arg0) { case LHCALL_FLUSH_ASYNC: /* This call does nothing, except by breaking out of the Guest * it makes us process all the asynchronous hypercalls. */ break; case LHCALL_LGUEST_INIT: /* You can't get here unless you're already initialized. Don't * do that. */ kill_guest(cpu, "already have lguest_data"); break; case LHCALL_SHUTDOWN: { /* Shutdown is such a trivial hypercall that we do it in four * lines right here. */ char msg[128]; /* If the lgread fails, it will call kill_guest() itself; the * kill_guest() with the message will be ignored. */ __lgread(cpu, msg, args->arg1, sizeof(msg)); msg[sizeof(msg)-1] = '\0'; kill_guest(cpu, "CRASH: %s", msg); if (args->arg2 == LGUEST_SHUTDOWN_RESTART) cpu->lg->dead = ERR_PTR(-ERESTART); break; } case LHCALL_FLUSH_TLB: /* FLUSH_TLB comes in two flavors, depending on the * argument: */ if (args->arg1) guest_pagetable_clear_all(cpu); else guest_pagetable_flush_user(cpu); break; /* All these calls simply pass the arguments through to the right * routines. */ case LHCALL_NEW_PGTABLE: guest_new_pagetable(cpu, args->arg1); break; case LHCALL_SET_STACK: guest_set_stack(cpu, args->arg1, args->arg2, args->arg3); break; case LHCALL_SET_PTE: guest_set_pte(cpu, args->arg1, args->arg2, __pte(args->arg3)); break; case LHCALL_SET_PMD: guest_set_pmd(cpu->lg, args->arg1, args->arg2); break; case LHCALL_SET_CLOCKEVENT: guest_set_clockevent(cpu, args->arg1); break; case LHCALL_TS: /* This sets the TS flag, as we saw used in run_guest(). */ cpu->ts = args->arg1; break; case LHCALL_HALT: /* Similarly, this sets the halted flag for run_guest(). */ cpu->halted = 1; break; case LHCALL_NOTIFY: cpu->pending_notify = args->arg1; break; default: /* It should be an architecture-specific hypercall. */ if (lguest_arch_do_hcall(cpu, args)) kill_guest(cpu, "Bad hypercall %li\n", args->arg0); } }
static void do_hcall(struct lg_cpu *cpu, struct hcall_args *args) { switch (args->arg0) { case LHCALL_FLUSH_ASYNC: break; case LHCALL_SEND_INTERRUPTS: break; case LHCALL_LGUEST_INIT: kill_guest(cpu, "already have lguest_data"); break; case LHCALL_SHUTDOWN: { char msg[128]; __lgread(cpu, msg, args->arg1, sizeof(msg)); msg[sizeof(msg)-1] = '\0'; kill_guest(cpu, "CRASH: %s", msg); if (args->arg2 == LGUEST_SHUTDOWN_RESTART) cpu->lg->dead = ERR_PTR(-ERESTART); break; } case LHCALL_FLUSH_TLB: if (args->arg1) guest_pagetable_clear_all(cpu); else guest_pagetable_flush_user(cpu); break; case LHCALL_NEW_PGTABLE: guest_new_pagetable(cpu, args->arg1); break; case LHCALL_SET_STACK: guest_set_stack(cpu, args->arg1, args->arg2, args->arg3); break; case LHCALL_SET_PTE: #ifdef CONFIG_X86_PAE guest_set_pte(cpu, args->arg1, args->arg2, __pte(args->arg3 | (u64)args->arg4 << 32)); #else guest_set_pte(cpu, args->arg1, args->arg2, __pte(args->arg3)); #endif break; case LHCALL_SET_PGD: guest_set_pgd(cpu->lg, args->arg1, args->arg2); break; #ifdef CONFIG_X86_PAE case LHCALL_SET_PMD: guest_set_pmd(cpu->lg, args->arg1, args->arg2); break; #endif case LHCALL_SET_CLOCKEVENT: guest_set_clockevent(cpu, args->arg1); break; case LHCALL_TS: cpu->ts = args->arg1; break; case LHCALL_HALT: cpu->halted = 1; break; case LHCALL_NOTIFY: cpu->pending_notify = args->arg1; break; default: if (lguest_arch_do_hcall(cpu, args)) kill_guest(cpu, "Bad hypercall %li\n", args->arg0); } }