void __init tboot_probe(void) { tboot_shared_t *tboot_shared; unsigned long p_tboot_shared; uint32_t map_base, map_size; unsigned long map_addr; /* Look for valid page-aligned address for shared page. */ p_tboot_shared = simple_strtoul(opt_tboot, NULL, 0); if ( (p_tboot_shared == 0) || ((p_tboot_shared & ~PAGE_MASK) != 0) ) return; /* Map and check for tboot UUID. */ set_fixmap(FIX_TBOOT_SHARED_BASE, p_tboot_shared); tboot_shared = (tboot_shared_t *)fix_to_virt(FIX_TBOOT_SHARED_BASE); if ( tboot_shared == NULL ) return; if ( memcmp(&tboot_shared_uuid, (uuid_t *)tboot_shared, sizeof(uuid_t)) ) return; /* new tboot_shared (w/ GAS support, integrity, etc.) is not backwards compatible */ if ( tboot_shared->version < 4 ) { printk("unsupported version of tboot (%u)\n", tboot_shared->version); return; } g_tboot_shared = tboot_shared; printk("TBOOT: found shared page at phys addr %lx:\n", p_tboot_shared); printk(" version: %d\n", tboot_shared->version); printk(" log_addr: 0x%08x\n", tboot_shared->log_addr); printk(" shutdown_entry: 0x%08x\n", tboot_shared->shutdown_entry); printk(" tboot_base: 0x%08x\n", tboot_shared->tboot_base); printk(" tboot_size: 0x%x\n", tboot_shared->tboot_size); /* these will be needed by tboot_protect_mem_regions() and/or tboot_parse_dmar_table(), so get them now */ map_base = PFN_DOWN(TXT_PUB_CONFIG_REGS_BASE); map_size = PFN_UP(NR_TXT_CONFIG_PAGES * PAGE_SIZE); map_addr = (unsigned long)__va(map_base << PAGE_SHIFT); if ( map_pages_to_xen(map_addr, map_base, map_size, __PAGE_HYPERVISOR) ) return; /* TXT Heap */ txt_heap_base = *(uint64_t *)__va(TXT_PUB_CONFIG_REGS_BASE + TXTCR_HEAP_BASE); txt_heap_size = *(uint64_t *)__va(TXT_PUB_CONFIG_REGS_BASE + TXTCR_HEAP_SIZE); /* SINIT */ sinit_base = *(uint64_t *)__va(TXT_PUB_CONFIG_REGS_BASE + TXTCR_SINIT_BASE); sinit_size = *(uint64_t *)__va(TXT_PUB_CONFIG_REGS_BASE + TXTCR_SINIT_SIZE); destroy_xen_mappings((unsigned long)__va(map_base << PAGE_SHIFT), (unsigned long)__va((map_base + map_size) << PAGE_SHIFT)); }
void vunmap(const void *va) { #ifndef _PAGE_NONE unsigned long addr = (unsigned long)va; destroy_xen_mappings(addr, addr + PAGE_SIZE * vm_size(va)); #else /* Avoid tearing down intermediate page tables. */ map_pages_to_xen((unsigned long)va, 0, vm_size(va), _PAGE_NONE); #endif vm_free(va); }
void __init vm_init(void) { unsigned int i, nr; unsigned long va; vm_base = (void *)VMAP_VIRT_START; vm_end = PFN_DOWN(arch_vmap_virt_end() - vm_base); vm_low = PFN_UP((vm_end + 7) / 8); nr = PFN_UP((vm_low + 7) / 8); vm_top = nr * PAGE_SIZE * 8; for ( i = 0, va = (unsigned long)vm_bitmap; i < nr; ++i, va += PAGE_SIZE ) { struct page_info *pg = alloc_domheap_page(NULL, 0); map_pages_to_xen(va, page_to_mfn(pg), 1, PAGE_HYPERVISOR); clear_page((void *)va); } bitmap_fill(vm_bitmap, vm_low); /* Populate page tables for the bitmap if necessary. */ map_pages_to_xen(va, 0, vm_low - nr, MAP_SMALL_PAGES); }
void *__vmap(const unsigned long *mfn, unsigned int granularity, unsigned int nr, unsigned int align, unsigned int flags) { void *va = vm_alloc(nr * granularity, align); unsigned long cur = (unsigned long)va; for ( ; va && nr--; ++mfn, cur += PAGE_SIZE * granularity ) { if ( map_pages_to_xen(cur, *mfn, granularity, flags) ) { vunmap(va); va = NULL; } } return va; }
void *vm_alloc(unsigned int nr, unsigned int align) { unsigned int start, bit; if ( !align ) align = 1; else if ( align & (align - 1) ) align &= -align; spin_lock(&vm_lock); for ( ; ; ) { struct page_info *pg; ASSERT(vm_low == vm_top || !test_bit(vm_low, vm_bitmap)); for ( start = vm_low; start < vm_top; ) { bit = find_next_bit(vm_bitmap, vm_top, start + 1); if ( bit > vm_top ) bit = vm_top; /* * Note that this skips the first bit, making the * corresponding page a guard one. */ start = (start + align) & ~(align - 1); if ( bit < vm_top ) { if ( start + nr < bit ) break; start = find_next_zero_bit(vm_bitmap, vm_top, bit + 1); } else { if ( start + nr <= bit ) break; start = bit; } } if ( start < vm_top ) break; spin_unlock(&vm_lock); if ( vm_top >= vm_end ) return NULL; pg = alloc_domheap_page(NULL, 0); if ( !pg ) return NULL; spin_lock(&vm_lock); if ( start >= vm_top ) { unsigned long va = (unsigned long)vm_bitmap + vm_top / 8; if ( !map_pages_to_xen(va, page_to_mfn(pg), 1, PAGE_HYPERVISOR) ) { clear_page((void *)va); vm_top += PAGE_SIZE * 8; if ( vm_top > vm_end ) vm_top = vm_end; continue; } } free_domheap_page(pg); if ( start >= vm_top ) { spin_unlock(&vm_lock); return NULL; } } for ( bit = start; bit < start + nr; ++bit ) __set_bit(bit, vm_bitmap); if ( bit < vm_top ) ASSERT(!test_bit(bit, vm_bitmap)); else ASSERT(bit == vm_top); if ( start <= vm_low + 2 ) vm_low = bit; spin_unlock(&vm_lock); return vm_base + start * PAGE_SIZE; }