Пример #1
0
void
sys$populate_init_objects(struct vms$meminfo *mem_info, vms$pointer pagesize)
{
    extern struct pd    freevms_pd;

    struct initial_obj  *obj;
    struct memsection   *ret;

    unsigned int        i;

    vms$pointer         base;

    obj = mem_info->objects;

    for(i = 0; i < mem_info->num_objects; i++, obj++)
    {
        if (obj->flags & VMS$IOF_VIRT)
        {
            base = sys$page_round_down(obj->base, pagesize);
            ret = sys$pd_create_memsection(&freevms_pd, obj->end - base,
                    base, VMS$MEM_INTERNAL, pagesize);

            PANIC(ret == NULL);
            // Check if it is correctly in object table
            PANIC(sys$objtable_lookup((void*) obj->base) == 0);
        }
    }

    return;
}
Пример #2
0
void map_stack(int asid, md_addr_t sp, md_addr_t bos)
{
    std::lock_guard<XIOSIM_LOCK> l(memory_lock);
    assert(sp != 0);
    assert(bos != 0);

    /* Create local pages for stack */
    md_addr_t page_start = page_round_down(sp);
    md_addr_t page_end = page_round_up(bos);

    mem_newmap(asid, page_start, page_end - page_start);
}
Пример #3
0
void notify_mmap(int asid, md_addr_t addr, size_t length, bool mod_brk)
{
    std::lock_guard<XIOSIM_LOCK> l(memory_lock);
    md_addr_t page_addr = page_round_down(addr);
    size_t page_length = page_round_up(length);

    mem_newmap(asid, page_addr, page_length);

    md_addr_t curr_brk = get_brk(asid);
    if(mod_brk && page_addr > curr_brk)
        set_brk(asid, page_addr + page_length);
}
Пример #4
0
static struct block *block_find(const void *ptr) 
{
	struct block *block;

	LOG(("block_find; ptr=0x%x\n", ptr));
	assert(ptr);

	/* locate block based on pointer, then check whether it is valid */
	block = (struct block *) page_round_down(
		(unsigned long) ((struct block *) __UNCONST(ptr) - 1));
	block_check(block);
	LOG(("block_find; block=0x%x\n", block));
	return block;
}
Пример #5
0
void notify_write(int asid, md_addr_t addr)
{
    std::lock_guard<XIOSIM_LOCK> l(memory_lock);
    if (!mem_is_mapped(asid, addr))
        mem_newmap(asid, page_round_down(addr), PAGE_SIZE);
}
Пример #6
0
void
sys$bootstrap(struct vms$meminfo *mem_info, vms$pointer pagesize)
{
    struct memsection       *heap;

    unsigned int            i;

    vms$pointer             base;
    vms$pointer             end;

    notice(SYSBOOT_I_SYSBOOT "reserving memory for preloaded objects\n");

    // Initialization
    pm_alloc.internal.base = 0;
    pm_alloc.internal.end = 0;
    pm_alloc.internal.active = 0;

    vm_alloc.internal.base = 0;
    vm_alloc.internal.end = 0;
    vm_alloc.internal.active = 0;

    for(i = 0; i <= MAX_FPAGE_ORDER; i++)
    {
        TAILQ_INIT(&vm_alloc.flist[i]);
        TAILQ_INIT(&pm_alloc.flist[i]);
    }

    // Bootimage objects are removed from free virtual memory.
    for(i = 0; i < mem_info->num_objects; i++)
    {
        if (mem_info->objects[i].flags & VMS$IOF_VIRT)
        {
            notice(MEM_I_ALLOC "allocating $%016lX - $%016lX\n",
                    mem_info->objects[i].base, mem_info->objects[i].end);
            sys$remove_virtmem(mem_info, mem_info->objects[i].base,
                    mem_info->objects[i].end, pagesize);
        }
    }

    // Free up som virtual memory to bootstrap the fpage allocator.
    for(i = 0; i < mem_info->num_vm_regions; i++)
    {
        base = sys$page_round_up(mem_info->vm_regions[i].base, pagesize);
        end = sys$page_round_down(mem_info->vm_regions[i].end + 1, pagesize)
            - 1;

        if ((end - (base + 1)) >= (2 * pagesize))
        {
            notice(MEM_I_FALLOC "bootstrapping Fpage allocator at virtual "
                    "addresses\n");
            notice(MEM_I_FALLOC "$%016lX - $%016lX\n", base, end);
            sys$fpage_free_internal(&vm_alloc, base, end);
            mem_info->vm_regions[i].end = mem_info->vm_regions[i].base;
            break;
        }
    }

    PANIC(i >= mem_info->num_regions);

    // We need to make sure the first chunk of physical memory we free
    // is at least 2 * pagesize to bootstrap the slab allocators for
    // memsections and the fpage lists.

    for(i = 0; i < mem_info->num_regions; i++)
    {
        base = sys$page_round_up(mem_info->regions[i].base, pagesize);
        end = sys$page_round_down(mem_info->regions[i].end + 1, pagesize) - 1;

        if (((end - base) + 1) >= (2 * pagesize))
        {
            notice(MEM_I_SALLOC "bootstrapping Slab allocator at physical "
                    "addresses\n");
            notice(MEM_I_SALLOC "$%016lX - $%016lX\n", base, end);
            sys$fpage_free_chunk(&pm_alloc, base, end);
            mem_info->regions[i].end = mem_info->regions[i].base;
            break;
        }
    }

    PANIC(i >= mem_info->num_regions);

    // Base and end may not be aligned, but we need them to be aligned. If
    // the area is less than a page than we should not add it to the free list.

    for(i = 0; i < mem_info->num_regions; i++)
    {
        if (mem_info->regions[i].base == mem_info->regions[i].end)
        {
            continue;
        }

        base = sys$page_round_up(mem_info->regions[i].base, pagesize);
        end = sys$page_round_down(mem_info->regions[i].end + 1, pagesize) - 1;

        if (base < end)
        {
            notice(MEM_I_FREE "freeing region $%016lX - $%016lX\n", base, end);
            sys$fpage_free_chunk(&pm_alloc, base, end);
        }
    }

    sys$fpage_clear_internal(&vm_alloc);

    // Initialize VM allocator

    for(i = 0; i < mem_info->num_vm_regions; i++)
    {
        if (mem_info->vm_regions[i].base < mem_info->vm_regions[i].end)
        {
            notice(MEM_I_VALLOC "adding $%016lX - $%016lX to VM allocator\n",
                    mem_info->vm_regions[i].base, mem_info->vm_regions[i].end);
            sys$fpage_free_chunk(&vm_alloc, mem_info->vm_regions[i].base,
                    mem_info->vm_regions[i].end);
        }
    }

    // Setup the kernel heap

    heap = sys$pd_create_memsection((struct pd *) NULL, VMS$HEAP_SIZE, 0,
            VMS$MEM_NORMAL | VMS$MEM_USER, pagesize);

    PANIC(heap == NULL, notice(SYS_F_HEAP "cannot allocate kernel heap\n"));

    sys$alloc_init(heap->base, heap->end);
    return;
}
Пример #7
0
void
sys$mem_init(L4_KernelInterfacePage_t *kip, struct vms$meminfo *mem_info,
        vms$pointer pagesize)
{
    static struct initial_obj       static_objects[NUM_MI_OBJECTS];

    static struct memdesc           static_regions[NUM_MI_REGIONS];
    static struct memdesc           static_io_regions[NUM_MI_IOREGIONS];
    static struct memdesc           static_vm_regions[NUM_MI_VMREGIONS];

    unsigned int                    i;

    notice(SYSBOOT_I_SYSBOOT "initializing memory\n");

    mem_info->regions = static_regions;
    mem_info->max_regions = NUM_MI_REGIONS;
    mem_info->num_regions = sys$find_memory_region(kip,
            NUM_MI_REGIONS, VMS$MEM_RAM, VMS$MEM_IO, static_regions);

    mem_info->io_regions = static_io_regions;
    mem_info->max_io_regions = NUM_MI_IOREGIONS;
    mem_info->num_io_regions = sys$find_memory_region(kip,
            NUM_MI_IOREGIONS, VMS$MEM_IO, VMS$MEM_RAM, static_io_regions);

    mem_info->vm_regions = static_vm_regions;
    mem_info->max_vm_regions = NUM_MI_VMREGIONS;
    mem_info->num_vm_regions = sys$find_memory_region(kip,
            NUM_MI_VMREGIONS, VMS$MEM_VM, 0, static_vm_regions);

    // Create a guard page

    mem_info->num_vm_regions = sys$remove_chunk(mem_info->vm_regions,
            mem_info->num_vm_regions, NUM_MI_VMREGIONS, 0, pagesize - 1);

    mem_info->objects = static_objects;
    mem_info->max_objects = NUM_MI_OBJECTS;
    mem_info->num_objects = sys$find_initial_objects(kip,
            NUM_MI_OBJECTS, static_objects);

    // Remove any initial objects from free physical memory

    for(i = 0; i < mem_info->num_objects; i++)
    {
        if (mem_info->objects[i].flags & VMS$IOF_PHYS)
        {
            mem_info->num_regions = sys$remove_chunk(mem_info->regions,
                    mem_info->num_regions, NUM_MI_REGIONS,
                    sys$page_round_down(mem_info->objects[i].base, pagesize),
                    sys$page_round_up(mem_info->objects[i].end, pagesize) - 1);
        }
    }

    sys$set_flags(mem_info, VMS$IOF_APP, VMS$IOF_VIRT);
    mem_info->swapper_base = 0;

    for(i = 0; i < mem_info->num_regions; i++)
    {
        notice(MEM_I_AREA "$%016lX - $%016lX: physical memory\n",
                mem_info->regions[i].base, mem_info->regions[i].end);

        if (mem_info->swapper_base < mem_info->regions[i].end)
        {
            mem_info->swapper_base = mem_info->regions[i].end + 1;
        }
    }

    for(i = 0; i < mem_info->num_vm_regions; i++)
    {
        notice(MEM_I_AREA "$%016lX - $%016lX: virtual memory\n",
                mem_info->vm_regions[i].base, mem_info->vm_regions[i].end);
    }

    for(i = 0; i < mem_info->num_io_regions; i++)
    {
        notice(MEM_I_AREA "$%016lX - $%016lX: mapped IO\n",
                mem_info->io_regions[i].base, mem_info->io_regions[i].end);
    }

    for(i = 0; i < mem_info->num_objects; i++)
    {
        if (mem_info->objects[i].flags & VMS$IOF_ROOT)
        {
            notice(MEM_I_AREA "$%016lX - $%016lX: kernel\n",
                    mem_info->objects[i].base, mem_info->objects[i].end);
        }
        else if (mem_info->objects[i].flags & VMS$IOF_RESERVED)
        {
            notice(MEM_I_AREA "$%016lX - $%016lX: reserved by kernel\n",
                    mem_info->objects[i].base, mem_info->objects[i].end);
        }
        else if (mem_info->objects[i].flags & VMS$IOF_BOOT)
        {
            notice(MEM_I_AREA "$%016lX - $%016lX: boot information\n",
                    mem_info->objects[i].base, mem_info->objects[i].end);
        }
        else
        {
            notice(MEM_I_AREA "$%016lX - $%016lX: modules\n",
                    mem_info->objects[i].base, mem_info->objects[i].end);
        }
    }

    return;
}