/*H:435 * And this is us, creating the new page directory. If we really do * allocate a new one (and so the kernel parts are not there), we set * blank_pgdir. */ static unsigned int new_pgdir(struct lg_cpu *cpu, unsigned long gpgdir, int *blank_pgdir) { unsigned int next; #ifdef CONFIG_X86_PAE pmd_t *pmd_table; #endif /* * We pick one entry at random to throw out. Choosing the Least * Recently Used might be better, but this is easy. */ next = random32() % ARRAY_SIZE(cpu->lg->pgdirs); /* If it's never been allocated at all before, try now. */ if (!cpu->lg->pgdirs[next].pgdir) { cpu->lg->pgdirs[next].pgdir = (pgd_t *)get_zeroed_page(GFP_KERNEL); /* If the allocation fails, just keep using the one we have */ if (!cpu->lg->pgdirs[next].pgdir) next = cpu->cpu_pgd; else { #ifdef CONFIG_X86_PAE /* * In PAE mode, allocate a pmd page and populate the * last pgd entry. */ pmd_table = (pmd_t *)get_zeroed_page(GFP_KERNEL); if (!pmd_table) { free_page((long)cpu->lg->pgdirs[next].pgdir); set_pgd(cpu->lg->pgdirs[next].pgdir, __pgd(0)); next = cpu->cpu_pgd; } else { set_pgd(cpu->lg->pgdirs[next].pgdir + SWITCHER_PGD_INDEX, __pgd(__pa(pmd_table) | _PAGE_PRESENT)); /* * This is a blank page, so there are no kernel * mappings: caller must map the stack! */ *blank_pgdir = 1; } #else *blank_pgdir = 1; #endif } } /* Record which Guest toplevel this shadows. */ cpu->lg->pgdirs[next].gpgdir = gpgdir; /* Release all the non-kernel mappings. */ flush_user_mappings(cpu->lg, next); return next; }
/*H:435 * And this is us, creating the new page directory. If we really do * allocate a new one (and so the kernel parts are not there), we set * blank_pgdir. */ static unsigned int new_pgdir(struct lg_cpu *cpu, unsigned long gpgdir, int *blank_pgdir) { unsigned int next; /* * We pick one entry at random to throw out. Choosing the Least * Recently Used might be better, but this is easy. */ next = prandom_u32() % ARRAY_SIZE(cpu->lg->pgdirs); /* If it's never been allocated at all before, try now. */ if (!cpu->lg->pgdirs[next].pgdir) { cpu->lg->pgdirs[next].pgdir = (pgd_t *)get_zeroed_page(GFP_KERNEL); /* If the allocation fails, just keep using the one we have */ if (!cpu->lg->pgdirs[next].pgdir) next = cpu->cpu_pgd; else { /* * This is a blank page, so there are no kernel * mappings: caller must map the stack! */ *blank_pgdir = 1; } } /* Record which Guest toplevel this shadows. */ cpu->lg->pgdirs[next].gpgdir = gpgdir; /* Release all the non-kernel mappings. */ flush_user_mappings(cpu->lg, next); /* This hasn't run on any CPU at all. */ cpu->lg->pgdirs[next].last_host_cpu = -1; return next; }
/*H:440 * (v) Flushing (throwing away) page tables, * * The Guest has a hypercall to throw away the page tables: it's used when a * large number of mappings have been changed. */ void guest_pagetable_flush_user(struct lg_cpu *cpu) { /* Drop the userspace part of the current page table. */ flush_user_mappings(cpu->lg, cpu->cpu_pgd); }