Exemple #1
0
static void mem_delmap(int asid, md_addr_t addr, size_t length)
{
    ZTRACE_PRINT(INVALID_CORE, "mem_delmap: %d, %" PRIxPTR", length: %zd\n", asid, addr, length);

    assert(asid >= 0 && asid < num_address_spaces);

    /* Check alignment */
    if (page_offset(addr)) {
        fprintf(stderr, "mem_delmap: Address %" PRIxPTR" not aligned\n", addr);
        abort();
    }

    /* Remove every page in the range from page table */
    md_addr_t last_addr = page_round_up(addr + length);
    for (md_addr_t curr_addr = addr; (curr_addr <= last_addr) && curr_addr; curr_addr += PAGE_SIZE) {
        if (!mem_is_mapped(asid, curr_addr))
            continue; /* Attempting to remove something missing is ok */

        md_addr_t curr_vpn = curr_addr >> PAGE_SHIFT;
        page_tables[asid].erase(curr_vpn);

        page_count[asid]--;
        phys_page_count--;
    }
}
Exemple #2
0
/* We allocate physical pages to virtual pages on a
 * first-come-first-serve basis. Seems like linux frowns upon
 * page coloring, so should be reasonably accurate. */
static md_paddr_t next_ppn_to_allocate = 0x00000100; /* arbitrary starting point; */
static void mem_newmap(int asid, md_addr_t addr, size_t length)
{
    ZTRACE_PRINT(INVALID_CORE, "mem_newmap: %d, %" PRIxPTR", length: %zd\n", asid, addr, length);

    assert(asid >= 0 && asid < num_address_spaces);
    assert(addr != 0); // Mapping 0-th page might cause hell to break loose, don't do it.

    /* Check alignment */
    if (page_offset(addr)) {
        fprintf(stderr, "mem_newmap: Address %" PRIxPTR" not aligned\n", addr);
        abort();
    }

    /* Add every page in the range to page table */
    md_addr_t last_addr = page_round_up(addr + length);
    for (md_addr_t curr_addr = addr; (curr_addr <= last_addr) && curr_addr; curr_addr += PAGE_SIZE) {
        if (mem_is_mapped(asid, curr_addr))
            continue; /* Attempting to double-map is ok */

        md_addr_t curr_vpn = curr_addr >> PAGE_SHIFT;
        page_tables[asid][curr_vpn] = next_ppn_to_allocate;

        next_ppn_to_allocate++;

        page_count[asid]++;
        phys_page_count++;
    }
}
Exemple #3
0
void map_stack(int asid, md_addr_t sp, md_addr_t bos)
{
    std::lock_guard<XIOSIM_LOCK> l(memory_lock);
    assert(sp != 0);
    assert(bos != 0);

    /* Create local pages for stack */
    md_addr_t page_start = page_round_down(sp);
    md_addr_t page_end = page_round_up(bos);

    mem_newmap(asid, page_start, page_end - page_start);
}
Exemple #4
0
void update_brk(int asid, md_addr_t brk_end, bool do_mmap)
{
    assert(brk_end != 0);

    if(do_mmap)
    {
        md_addr_t old_brk_end = get_brk(asid);

        if(brk_end > old_brk_end)
            notify_mmap(asid, page_round_up(old_brk_end),
                        page_round_up(brk_end - old_brk_end), false);
        else if(brk_end < old_brk_end)
            notify_munmap(asid, page_round_up(brk_end),
                          page_round_up(old_brk_end - brk_end), false);
    }

    {
        std::lock_guard<XIOSIM_LOCK> l(memory_lock);
        set_brk(asid, brk_end);
    }
}
Exemple #5
0
void notify_mmap(int asid, md_addr_t addr, size_t length, bool mod_brk)
{
    std::lock_guard<XIOSIM_LOCK> l(memory_lock);
    md_addr_t page_addr = page_round_down(addr);
    size_t page_length = page_round_up(length);

    mem_newmap(asid, page_addr, page_length);

    md_addr_t curr_brk = get_brk(asid);
    if(mod_brk && page_addr > curr_brk)
        set_brk(asid, page_addr + page_length);
}
Exemple #6
0
static size_t block_get_totalsize(size_t size)
{
	return page_round_up(sizeof(struct block) + size);
}
Exemple #7
0
void notify_munmap(int asid, md_addr_t addr, size_t length, bool mod_brk)
{
    std::lock_guard<XIOSIM_LOCK> l(memory_lock);
    mem_delmap(asid, page_round_up(addr), length);
}
Exemple #8
0
void
sys$bootstrap(struct vms$meminfo *mem_info, vms$pointer pagesize)
{
    struct memsection       *heap;

    unsigned int            i;

    vms$pointer             base;
    vms$pointer             end;

    notice(SYSBOOT_I_SYSBOOT "reserving memory for preloaded objects\n");

    // Initialization
    pm_alloc.internal.base = 0;
    pm_alloc.internal.end = 0;
    pm_alloc.internal.active = 0;

    vm_alloc.internal.base = 0;
    vm_alloc.internal.end = 0;
    vm_alloc.internal.active = 0;

    for(i = 0; i <= MAX_FPAGE_ORDER; i++)
    {
        TAILQ_INIT(&vm_alloc.flist[i]);
        TAILQ_INIT(&pm_alloc.flist[i]);
    }

    // Bootimage objects are removed from free virtual memory.
    for(i = 0; i < mem_info->num_objects; i++)
    {
        if (mem_info->objects[i].flags & VMS$IOF_VIRT)
        {
            notice(MEM_I_ALLOC "allocating $%016lX - $%016lX\n",
                    mem_info->objects[i].base, mem_info->objects[i].end);
            sys$remove_virtmem(mem_info, mem_info->objects[i].base,
                    mem_info->objects[i].end, pagesize);
        }
    }

    // Free up som virtual memory to bootstrap the fpage allocator.
    for(i = 0; i < mem_info->num_vm_regions; i++)
    {
        base = sys$page_round_up(mem_info->vm_regions[i].base, pagesize);
        end = sys$page_round_down(mem_info->vm_regions[i].end + 1, pagesize)
            - 1;

        if ((end - (base + 1)) >= (2 * pagesize))
        {
            notice(MEM_I_FALLOC "bootstrapping Fpage allocator at virtual "
                    "addresses\n");
            notice(MEM_I_FALLOC "$%016lX - $%016lX\n", base, end);
            sys$fpage_free_internal(&vm_alloc, base, end);
            mem_info->vm_regions[i].end = mem_info->vm_regions[i].base;
            break;
        }
    }

    PANIC(i >= mem_info->num_regions);

    // We need to make sure the first chunk of physical memory we free
    // is at least 2 * pagesize to bootstrap the slab allocators for
    // memsections and the fpage lists.

    for(i = 0; i < mem_info->num_regions; i++)
    {
        base = sys$page_round_up(mem_info->regions[i].base, pagesize);
        end = sys$page_round_down(mem_info->regions[i].end + 1, pagesize) - 1;

        if (((end - base) + 1) >= (2 * pagesize))
        {
            notice(MEM_I_SALLOC "bootstrapping Slab allocator at physical "
                    "addresses\n");
            notice(MEM_I_SALLOC "$%016lX - $%016lX\n", base, end);
            sys$fpage_free_chunk(&pm_alloc, base, end);
            mem_info->regions[i].end = mem_info->regions[i].base;
            break;
        }
    }

    PANIC(i >= mem_info->num_regions);

    // Base and end may not be aligned, but we need them to be aligned. If
    // the area is less than a page than we should not add it to the free list.

    for(i = 0; i < mem_info->num_regions; i++)
    {
        if (mem_info->regions[i].base == mem_info->regions[i].end)
        {
            continue;
        }

        base = sys$page_round_up(mem_info->regions[i].base, pagesize);
        end = sys$page_round_down(mem_info->regions[i].end + 1, pagesize) - 1;

        if (base < end)
        {
            notice(MEM_I_FREE "freeing region $%016lX - $%016lX\n", base, end);
            sys$fpage_free_chunk(&pm_alloc, base, end);
        }
    }

    sys$fpage_clear_internal(&vm_alloc);

    // Initialize VM allocator

    for(i = 0; i < mem_info->num_vm_regions; i++)
    {
        if (mem_info->vm_regions[i].base < mem_info->vm_regions[i].end)
        {
            notice(MEM_I_VALLOC "adding $%016lX - $%016lX to VM allocator\n",
                    mem_info->vm_regions[i].base, mem_info->vm_regions[i].end);
            sys$fpage_free_chunk(&vm_alloc, mem_info->vm_regions[i].base,
                    mem_info->vm_regions[i].end);
        }
    }

    // Setup the kernel heap

    heap = sys$pd_create_memsection((struct pd *) NULL, VMS$HEAP_SIZE, 0,
            VMS$MEM_NORMAL | VMS$MEM_USER, pagesize);

    PANIC(heap == NULL, notice(SYS_F_HEAP "cannot allocate kernel heap\n"));

    sys$alloc_init(heap->base, heap->end);
    return;
}
Exemple #9
0
void
sys$mem_init(L4_KernelInterfacePage_t *kip, struct vms$meminfo *mem_info,
        vms$pointer pagesize)
{
    static struct initial_obj       static_objects[NUM_MI_OBJECTS];

    static struct memdesc           static_regions[NUM_MI_REGIONS];
    static struct memdesc           static_io_regions[NUM_MI_IOREGIONS];
    static struct memdesc           static_vm_regions[NUM_MI_VMREGIONS];

    unsigned int                    i;

    notice(SYSBOOT_I_SYSBOOT "initializing memory\n");

    mem_info->regions = static_regions;
    mem_info->max_regions = NUM_MI_REGIONS;
    mem_info->num_regions = sys$find_memory_region(kip,
            NUM_MI_REGIONS, VMS$MEM_RAM, VMS$MEM_IO, static_regions);

    mem_info->io_regions = static_io_regions;
    mem_info->max_io_regions = NUM_MI_IOREGIONS;
    mem_info->num_io_regions = sys$find_memory_region(kip,
            NUM_MI_IOREGIONS, VMS$MEM_IO, VMS$MEM_RAM, static_io_regions);

    mem_info->vm_regions = static_vm_regions;
    mem_info->max_vm_regions = NUM_MI_VMREGIONS;
    mem_info->num_vm_regions = sys$find_memory_region(kip,
            NUM_MI_VMREGIONS, VMS$MEM_VM, 0, static_vm_regions);

    // Create a guard page

    mem_info->num_vm_regions = sys$remove_chunk(mem_info->vm_regions,
            mem_info->num_vm_regions, NUM_MI_VMREGIONS, 0, pagesize - 1);

    mem_info->objects = static_objects;
    mem_info->max_objects = NUM_MI_OBJECTS;
    mem_info->num_objects = sys$find_initial_objects(kip,
            NUM_MI_OBJECTS, static_objects);

    // Remove any initial objects from free physical memory

    for(i = 0; i < mem_info->num_objects; i++)
    {
        if (mem_info->objects[i].flags & VMS$IOF_PHYS)
        {
            mem_info->num_regions = sys$remove_chunk(mem_info->regions,
                    mem_info->num_regions, NUM_MI_REGIONS,
                    sys$page_round_down(mem_info->objects[i].base, pagesize),
                    sys$page_round_up(mem_info->objects[i].end, pagesize) - 1);
        }
    }

    sys$set_flags(mem_info, VMS$IOF_APP, VMS$IOF_VIRT);
    mem_info->swapper_base = 0;

    for(i = 0; i < mem_info->num_regions; i++)
    {
        notice(MEM_I_AREA "$%016lX - $%016lX: physical memory\n",
                mem_info->regions[i].base, mem_info->regions[i].end);

        if (mem_info->swapper_base < mem_info->regions[i].end)
        {
            mem_info->swapper_base = mem_info->regions[i].end + 1;
        }
    }

    for(i = 0; i < mem_info->num_vm_regions; i++)
    {
        notice(MEM_I_AREA "$%016lX - $%016lX: virtual memory\n",
                mem_info->vm_regions[i].base, mem_info->vm_regions[i].end);
    }

    for(i = 0; i < mem_info->num_io_regions; i++)
    {
        notice(MEM_I_AREA "$%016lX - $%016lX: mapped IO\n",
                mem_info->io_regions[i].base, mem_info->io_regions[i].end);
    }

    for(i = 0; i < mem_info->num_objects; i++)
    {
        if (mem_info->objects[i].flags & VMS$IOF_ROOT)
        {
            notice(MEM_I_AREA "$%016lX - $%016lX: kernel\n",
                    mem_info->objects[i].base, mem_info->objects[i].end);
        }
        else if (mem_info->objects[i].flags & VMS$IOF_RESERVED)
        {
            notice(MEM_I_AREA "$%016lX - $%016lX: reserved by kernel\n",
                    mem_info->objects[i].base, mem_info->objects[i].end);
        }
        else if (mem_info->objects[i].flags & VMS$IOF_BOOT)
        {
            notice(MEM_I_AREA "$%016lX - $%016lX: boot information\n",
                    mem_info->objects[i].base, mem_info->objects[i].end);
        }
        else
        {
            notice(MEM_I_AREA "$%016lX - $%016lX: modules\n",
                    mem_info->objects[i].base, mem_info->objects[i].end);
        }
    }

    return;
}