示例#1
0
/*===========================================================================*
 *				pt_ptalloc		     		     *
 *===========================================================================*/
PRIVATE int pt_ptalloc(pt_t *pt, int pde, u32_t flags)
{
    /* Allocate a page table and write its address into the page directory. */
    int i;
    u32_t pt_phys;

    /* Argument must make sense. */
    vm_assert(pde >= 0 && pde < I386_VM_DIR_ENTRIES);
    vm_assert(!(flags & ~(PTF_ALLFLAGS)));

    /* We don't expect to overwrite page directory entry, nor
     * storage for the page table.
     */
    vm_assert(!(pt->pt_dir[pde] & I386_VM_PRESENT));
    vm_assert(!pt->pt_pt[pde]);

    /* Get storage for the page table. */
    if(!(pt->pt_pt[pde] = vm_allocpage(&pt_phys, VMP_PAGETABLE)))
        return ENOMEM;

    for(i = 0; i < I386_VM_PT_ENTRIES; i++)
        pt->pt_pt[pde][i] = 0;	/* Empty entry. */

    /* Make page directory entry.
     * The PDE is always 'present,' 'writable,' and 'user accessible,'
     * relying on the PTE for protection.
     */
    pt->pt_dir[pde] = (pt_phys & I386_VM_ADDR_MASK) | flags
                      | I386_VM_PRESENT | I386_VM_USER | I386_VM_WRITE;

    return OK;
}
示例#2
0
struct slabdata *newslabdata(int list)
{
	struct slabdata *n;
	phys_bytes p;

	vm_assert(sizeof(*n) == VM_PAGE_SIZE);

	if(!(n = vm_allocpage(&p, VMP_SLAB))) {
		printk("newslabdata: vm_allocpage failed\n");
		return NULL;
	}
	memset(n->sdh.usebits, 0, sizeof(n->sdh.usebits));
	pages++;

	n->sdh.phys = p;
#if SANITYCHECKS
	n->sdh.magic1 = MAGIC1;
	n->sdh.magic2 = MAGIC2;
#endif
	n->sdh.nused = 0;
	n->sdh.freeguess = 0;
	n->sdh.list = list;

#if SANITYCHECKS
	n->sdh.writable = WRITABLE_HEADER;
	SLABDATAUNWRITABLE(n);
#endif

	return n;
}
示例#3
0
/*===========================================================================*
 *				vm_pagelock		     		     *
 *===========================================================================*/
PUBLIC void vm_pagelock(void *vir, int lockflag)
{
    /* Mark a page allocated by vm_allocpage() unwritable, i.e. only for VM. */
    vir_bytes m;
    int r;
    u32_t flags = I386_VM_PRESENT | I386_VM_USER;
    pt_t *pt;

    pt = &vmp->vm_pt;
    m = arch_vir2map(vmp, (vir_bytes) vir);

    vm_assert(!(m % I386_PAGE_SIZE));

    if(!lockflag)
        flags |= I386_VM_WRITE;

    /* Update flags. */
    if((r=pt_writemap(pt, m, 0, I386_PAGE_SIZE,
                      flags, WMF_OVERWRITE | WMF_WRITEFLAGSONLY)) != OK) {
        vm_panic("vm_lockpage: pt_writemap failed\n", NO_NUM);
    }

    if((r=sys_vmctl(SELF, VMCTL_FLUSHTLB, 0)) != OK) {
        vm_panic("VMCTL_FLUSHTLB failed", r);
    }

    return;
}
示例#4
0
/*===========================================================================*
 *				vm_freepages		     		     *
 *===========================================================================*/
PRIVATE void vm_freepages(vir_bytes vir, vir_bytes phys, int pages, int reason)
{
    vm_assert(reason >= 0 && reason < VMP_CATEGORIES);
    if(vir >= vmp->vm_stacktop) {
        vm_assert(!(vir % I386_PAGE_SIZE));
        vm_assert(!(phys % I386_PAGE_SIZE));
        FREE_MEM(ABS2CLICK(phys), pages);
        if(pt_writemap(&vmp->vm_pt, arch_vir2map(vmp, vir),
                       MAP_NONE, pages*I386_PAGE_SIZE, 0, WMF_OVERWRITE) != OK)
            vm_panic("vm_freepages: pt_writemap failed",
                     NO_NUM);
    } else {
        printf("VM: vm_freepages not freeing VM heap pages (%d)\n",
               pages);
    }
}
示例#5
0
/*===========================================================================*
 *				vm_getsparepage		     		     *
 *===========================================================================*/
PRIVATE void *vm_getsparepage(u32_t *phys)
{
    int s;
    vm_assert(missing_spares >= 0 && missing_spares <= SPAREPAGES);
    for(s = 0; s < SPAREPAGES; s++) {
        if(sparepages[s].page) {
            void *sp;
            sp = sparepages[s].page;
            *phys = sparepages[s].phys;
            sparepages[s].page = NULL;
            missing_spares++;
            vm_assert(missing_spares >= 0 && missing_spares <= SPAREPAGES);
            return sp;
        }
    }
    return NULL;
}
示例#6
0
/*===========================================================================*
 *				pt_bind			     		     *
 *===========================================================================*/
PUBLIC int pt_bind(pt_t *pt, struct vmproc *who)
{
    int slot, ispt;
    u32_t phys;

    /* Basic sanity checks. */
    vm_assert(who);
    vm_assert(who->vm_flags & VMF_INUSE);
    vm_assert(pt);

    slot = who->vm_slot;
    vm_assert(slot >= 0);
    vm_assert(slot < ELEMENTS(vmproc));
    vm_assert(slot < I386_VM_PT_ENTRIES);

    phys = pt->pt_dir_phys & I386_VM_ADDR_MASK;
    vm_assert(pt->pt_dir_phys == phys);

    /* Update "page directory pagetable." */
    page_directories[slot] = phys | I386_VM_PRESENT|I386_VM_WRITE;

#if 0
    printf("VM: slot %d has pde val 0x%lx\n", slot, page_directories[slot]);
#endif
    /* Tell kernel about new page table root. */
    return sys_vmctl(who->vm_endpoint, VMCTL_I386_SETCR3,
                     pt ? pt->pt_dir_phys : 0);
}
示例#7
0
文件: vm.c 项目: smorimura/filagree
static void push_var(struct context *context, struct byte_array *program)
{
    struct byte_array* name = serial_decode_string(program);
    VM_DEBUGPRINT("VAR %s\n", byte_array_to_string(name));
    struct variable *v = find_var(context, name);
    if (!v)
        DEBUGPRINT("variable %s not found\n", byte_array_to_string(name));
    vm_assert(context, v, "variable %s not found", byte_array_to_string(name));
    variable_push(context, v);
}
示例#8
0
文件: vm.c 项目: boostsup/minix3
/*===========================================================================*
 *				arch_map2vir				     *
 *===========================================================================*/
PUBLIC vir_bytes arch_map2vir(struct vmproc *vmp, vir_bytes addr)
{
	vir_bytes textstart = CLICK2ABS(vmp->vm_arch.vm_seg[T].mem_phys);
	vir_bytes datastart = CLICK2ABS(vmp->vm_arch.vm_seg[D].mem_phys);

	/* Could be a text address. */
	vm_assert(datastart <= addr || textstart <= addr);

	return addr - datastart;
}
示例#9
0
/*===========================================================================*
 *				do_mmap			     		     *
 *===========================================================================*/
int do_mmap(kipc_msg_t *m)
{
	int r, n;
	struct vmproc *vmp;
	int mfflags = 0;
	struct vir_region *vr = NULL;

	if((r=vm_isokendpt(m->m_source, &n)) != 0) {
		vm_panic("do_mmap: message from strange source", m->m_source);
	}

	vmp = &vmproc[n];

	if(!(vmp->vm_flags & VMF_HASPT))
		return -ENXIO;

	if(m->VMM_FD == -1 || (m->VMM_FLAGS & MAP_ANONYMOUS)) {
		int s;
		vir_bytes v;
		u32_t vrflags = VR_ANON | VR_WRITABLE;
		size_t len = (vir_bytes) m->VMM_LEN;

		if(m->VMM_FD != -1) {
			return -EINVAL;
		}

		if(m->VMM_FLAGS & MAP_CONTIG) mfflags |= MF_CONTIG;
		if(m->VMM_FLAGS & MAP_PREALLOC) mfflags |= MF_PREALLOC;
		if(m->VMM_FLAGS & MAP_LOWER16M) vrflags |= VR_LOWER16MB;
		if(m->VMM_FLAGS & MAP_LOWER1M)  vrflags |= VR_LOWER1MB;
		if(m->VMM_FLAGS & MAP_ALIGN64K) vrflags |= VR_PHYS64K;
		if(m->VMM_FLAGS & MAP_SHARED) vrflags |= VR_SHARED;

		if(len % VM_PAGE_SIZE)
			len += VM_PAGE_SIZE - (len % VM_PAGE_SIZE);

		if(!(vr = map_page_region(vmp,
			arch_vir2map(vmp,
				m->VMM_ADDR ? m->VMM_ADDR : vmp->vm_stacktop),
			VM_DATATOP, len, MAP_NONE, vrflags, mfflags))) {
			return -ENOMEM;
		}
	} else {
		return -ENOSYS;
	}

	/* Return mapping, as seen from process. */
	vm_assert(vr);
	m->VMM_RETADDR = arch_map2vir(vmp, vr->vaddr);


	return 0;
}
示例#10
0
/*===========================================================================*
 *				pt_checkrange		     		     *
 *===========================================================================*/
PUBLIC int pt_checkrange(pt_t *pt, vir_bytes v,  size_t bytes,
                         int write)
{
    int p, pages, pde;

    vm_assert(!(bytes % I386_PAGE_SIZE));

    pages = bytes / I386_PAGE_SIZE;

    for(p = 0; p < pages; p++) {
        u32_t entry;
        int pde = I386_VM_PDE(v);
        int pte = I386_VM_PTE(v);

        vm_assert(!(v % I386_PAGE_SIZE));
        vm_assert(pte >= 0 && pte < I386_VM_PT_ENTRIES);
        vm_assert(pde >= 0 && pde < I386_VM_DIR_ENTRIES);

        /* Page table has to be there. */
        if(!(pt->pt_dir[pde] & I386_VM_PRESENT))
            return EFAULT;

        /* Make sure page directory entry for this page table
         * is marked present and page table entry is available.
         */
        vm_assert((pt->pt_dir[pde] & I386_VM_PRESENT) && pt->pt_pt[pde]);

        if(!(pt->pt_pt[pde][pte] & I386_VM_PRESENT)) {
            return EFAULT;
        }

        if(write && !(pt->pt_pt[pde][pte] & I386_VM_WRITE)) {
            return EFAULT;
        }

        v += I386_PAGE_SIZE;
    }

    return OK;
}
示例#11
0
/*===========================================================================*
 *				vm_checkspares		     		     *
 *===========================================================================*/
PRIVATE void *vm_checkspares(void)
{
    int s, n = 0;
    static int total = 0, worst = 0;
    vm_assert(missing_spares >= 0 && missing_spares <= SPAREPAGES);
    for(s = 0; s < SPAREPAGES && missing_spares > 0; s++)
        if(!sparepages[s].page) {
            n++;
            if((sparepages[s].page = vm_allocpage(&sparepages[s].phys,
                                                  VMP_SPARE))) {
                missing_spares--;
                vm_assert(missing_spares >= 0);
                vm_assert(missing_spares <= SPAREPAGES);
            } else {
                printf("VM: warning: couldn't get new spare page\n");
            }
        }
    if(worst < n) worst = n;
    total += n;

    return NULL;
}
示例#12
0
/*===========================================================================*
 *				pt_mapkernel		     		     *
 *===========================================================================*/
PUBLIC int pt_mapkernel(pt_t *pt)
{
    int r, i;

    /* Any i386 page table needs to map in the kernel address space. */
    vm_assert(vmproc[VMP_SYSTEM].vm_flags & VMF_INUSE);

    if(bigpage_ok) {
        int pde;
        for(pde = 0; pde <= id_map_high_pde; pde++) {
            phys_bytes addr;
            addr = pde * I386_BIG_PAGE_SIZE;
            vm_assert((addr & I386_VM_ADDR_MASK) == addr);
            pt->pt_dir[pde] = addr | I386_VM_PRESENT |
                              I386_VM_BIGPAGE | I386_VM_USER |
                              I386_VM_WRITE | global_bit;
        }
    } else {
        vm_panic("VM: pt_mapkernel: no bigpage", NO_NUM);
    }

    if(pagedir_pde >= 0) {
        /* Kernel also wants to know about all page directories. */
        pt->pt_dir[pagedir_pde] = pagedir_pde_val;
    }

    for(i = 0; i < kernmappings; i++) {
        if(pt_writemap(pt,
                       kern_mappings[i].lin_addr,
                       kern_mappings[i].phys_addr,
                       kern_mappings[i].len,
                       kern_mappings[i].flags, 0) != OK) {
            vm_panic("pt_mapkernel: pt_writemap failed", NO_NUM);
        }
    }

    return OK;
}
示例#13
0
/*===========================================================================*
 *				findhole		     		     *
 *===========================================================================*/
PRIVATE u32_t findhole(pt_t *pt, u32_t vmin, u32_t vmax)
{
    /* Find a space in the virtual address space of pageteble 'pt',
     * between page-aligned BYTE offsets vmin and vmax, to fit
     * a page in. Return byte offset.
     */
    u32_t freefound = 0, curv;
    int pde = 0, try_restart;
    static u32_t lastv = 0;

    /* Input sanity check. */
    vm_assert(vmin + I386_PAGE_SIZE >= vmin);
    vm_assert(vmax >= vmin + I386_PAGE_SIZE);
    vm_assert((vmin % I386_PAGE_SIZE) == 0);
    vm_assert((vmax % I386_PAGE_SIZE) == 0);

#if SANITYCHECKS
    curv = ((u32_t) random()) % ((vmax - vmin)/I386_PAGE_SIZE);
    curv *= I386_PAGE_SIZE;
    curv += vmin;
#else
    curv = lastv;
    if(curv < vmin || curv >= vmax)
        curv = vmin;
#endif
    try_restart = 1;

    /* Start looking for a free page starting at vmin. */
    while(curv < vmax) {
        int pte;

        vm_assert(curv >= vmin);
        vm_assert(curv < vmax);

        pde = I386_VM_PDE(curv);
        pte = I386_VM_PTE(curv);

        if(!(pt->pt_dir[pde] & I386_VM_PRESENT) ||
                !(pt->pt_pt[pde][pte] & I386_VM_PRESENT)) {
            lastv = curv;
            return curv;
        }

        curv+=I386_PAGE_SIZE;

        if(curv >= vmax && try_restart) {
            curv = vmin;
            try_restart = 0;
        }
    }

    printf("VM: out of virtual address space in vm\n");

    return NO_MEM;
}
示例#14
0
文件: vm.c 项目: smorimura/filagree
static struct variable *binary_op_lst(struct context *context,
                                      enum Opcode op,
                                      const struct variable *u,
                                      const struct variable *v)
{
    vm_assert(context, u->type==VAR_LST && v->type==VAR_LST, "list op with non-lists");
    struct variable *w = NULL;

    switch (op) {
        case VM_ADD:
            w = variable_copy(context, v);
            for (int i=0; i<u->list->length; i++)
                array_add(w->list, array_get(u->list, i));
            map_update(w->map, u->map);
            break;
        default:
            return (struct variable*)vm_exit_message(context, "unknown string operation");
    }

    return w;
}
示例#15
0
文件: vm.c 项目: smorimura/filagree
static struct variable *binary_op_nil(struct context *context,
                                      enum Opcode op,
                                      const struct variable *u,
                                      const struct variable *v)
{
    vm_assert(context, u->type==VAR_NIL || v->type==VAR_NIL, "nil op with non-nils");
    if (v->type == VAR_NIL && u->type != VAR_NIL)
        return binary_op_nil(context, op, v, u); // 1st var should be nil

    switch (op) {
        case VM_EQU:    return variable_new_bool(context, v->type == u->type);
        case VM_NEQ:    return variable_new_bool(context, v->type != u->type);
        case VM_ADD:
        case VM_SUB:    return variable_copy(context, v);
        case VM_LTN:
        case VM_GTN:
        case VM_LEQ:
        case VM_GRQ: return variable_new_bool(context, false);
        default:
            return vm_exit_message(context, "unknown binary nil op");
    }
}
示例#16
0
文件: vm.c 项目: smorimura/filagree
static struct variable *list_get_int(struct context *context,
                                     const struct variable *indexable,
                                     uint32_t index)
{
    null_check(indexable);

    enum VarType it = (enum VarType)indexable->type;
    switch (it) {
        case VAR_INT: return variable_new_int(context, index);
        case VAR_LST:
            if (index < indexable->list->length)
                return (struct variable*)array_get(indexable->list, index);
            return variable_new_nil(context);
        case VAR_STR: {
            vm_assert(context, index < indexable->str->length, "index out of bounds");
            char *str = (char*)malloc(2);
            sprintf(str, "%c", indexable->str->data[index]);
            return variable_new_str(context, byte_array_from_string(str));
        }
        default:
            vm_exit_message(context, "indexing non-indexable");
            return NULL;
    }
}
示例#17
0
/*===========================================================================*
 *                              pt_init                                      *
 *===========================================================================*/
PUBLIC void pt_init(phys_bytes usedlimit)
{
    /* By default, the kernel gives us a data segment with pre-allocated
     * memory that then can't grow. We want to be able to allocate memory
     * dynamically, however. So here we copy the part of the page table
     * that's ours, so we get a private page table. Then we increase the
     * hardware segment size so we can allocate memory above our stack.
     */
    pt_t *newpt;
    int s, r;
    vir_bytes v, kpagedir;
    phys_bytes lo, hi;
    vir_bytes extra_clicks;
    u32_t moveup = 0;
    int global_bit_ok = 0;
    int free_pde;
    int p;
    vir_bytes kernlimit;
    vir_bytes sparepages_mem;
    phys_bytes sparepages_ph;

    /* Shorthand. */
    newpt = &vmp->vm_pt;


    /* Get ourselves spare pages. */
    if(!(sparepages_mem = (vir_bytes) aalloc(I386_PAGE_SIZE*SPAREPAGES)))
        vm_panic("pt_init: aalloc for spare failed", NO_NUM);
    if((r=sys_umap(SELF, VM_D, (vir_bytes) sparepages_mem,
                   I386_PAGE_SIZE*SPAREPAGES, &sparepages_ph)) != OK)
        vm_panic("pt_init: sys_umap failed", r);

    for(s = 0; s < SPAREPAGES; s++) {
        sparepages[s].page = (void *) (sparepages_mem + s*I386_PAGE_SIZE);
        sparepages[s].phys = sparepages_ph + s*I386_PAGE_SIZE;
    }

    missing_spares = 0;

    /* global bit and 4MB pages available? */
    global_bit_ok = _cpufeature(_CPUF_I386_PGE);
    bigpage_ok = _cpufeature(_CPUF_I386_PSE);

    /* Set bit for PTE's and PDE's if available. */
    if(global_bit_ok)
        global_bit = I386_VM_GLOBAL;

    /* The kernel and boot time processes need an identity mapping.
     * We use full PDE's for this without separate page tables.
     * Figure out which pde we can start using for other purposes.
     */
    id_map_high_pde = usedlimit / I386_BIG_PAGE_SIZE;

    /* We have to make mappings up till here. */
    free_pde = id_map_high_pde+1;

    /* Initial (current) range of our virtual address space. */
    lo = CLICK2ABS(vmp->vm_arch.vm_seg[T].mem_phys);
    hi = CLICK2ABS(vmp->vm_arch.vm_seg[S].mem_phys +
                   vmp->vm_arch.vm_seg[S].mem_len);

    vm_assert(!(lo % I386_PAGE_SIZE));
    vm_assert(!(hi % I386_PAGE_SIZE));

    if(lo < VM_PROCSTART) {
        moveup = VM_PROCSTART - lo;
        vm_assert(!(VM_PROCSTART % I386_PAGE_SIZE));
        vm_assert(!(lo % I386_PAGE_SIZE));
        vm_assert(!(moveup % I386_PAGE_SIZE));
    }

    /* Make new page table for ourselves, partly copied
     * from the current one.
     */
    if(pt_new(newpt) != OK)
        vm_panic("pt_init: pt_new failed", NO_NUM);

    /* Set up mappings for VM process. */
    for(v = lo; v < hi; v += I386_PAGE_SIZE)  {
        phys_bytes addr;
        u32_t flags;

        /* We have to write the new position in the PT,
         * so we can move our segments.
         */
        if(pt_writemap(newpt, v+moveup, v, I386_PAGE_SIZE,
                       I386_VM_PRESENT|I386_VM_WRITE|I386_VM_USER, 0) != OK)
            vm_panic("pt_init: pt_writemap failed", NO_NUM);
    }

    /* Move segments up too. */
    vmp->vm_arch.vm_seg[T].mem_phys += ABS2CLICK(moveup);
    vmp->vm_arch.vm_seg[D].mem_phys += ABS2CLICK(moveup);
    vmp->vm_arch.vm_seg[S].mem_phys += ABS2CLICK(moveup);

    /* Allocate us a page table in which to remember page directory
     * pointers.
     */
    if(!(page_directories = vm_allocpage(&page_directories_phys,
                                         VMP_PAGETABLE)))
        vm_panic("no virt addr for vm mappings", NO_NUM);

    memset(page_directories, 0, I386_PAGE_SIZE);

    /* Increase our hardware data segment to create virtual address
     * space above our stack. We want to increase it to VM_DATATOP,
     * like regular processes have.
     */
    extra_clicks = ABS2CLICK(VM_DATATOP - hi);
    vmp->vm_arch.vm_seg[S].mem_len += extra_clicks;

    /* We pretend to the kernel we have a huge stack segment to
     * increase our data segment.
     */
    vmp->vm_arch.vm_data_top =
        (vmp->vm_arch.vm_seg[S].mem_vir +
         vmp->vm_arch.vm_seg[S].mem_len) << CLICK_SHIFT;

    /* Where our free virtual address space starts.
     * This is only a hint to the VM system.
     */
    newpt->pt_virtop = 0;

    /* Let other functions know VM now has a private page table. */
    vmp->vm_flags |= VMF_HASPT;

    /* Now reserve another pde for kernel's own mappings. */
    {
        int kernmap_pde;
        phys_bytes addr, len;
        int flags, index = 0;
        u32_t offset = 0;

        kernmap_pde = free_pde++;
        offset = kernmap_pde * I386_BIG_PAGE_SIZE;

        while(sys_vmctl_get_mapping(index, &addr, &len,
                                    &flags) == OK)  {
            vir_bytes vir;
            if(index >= MAX_KERNMAPPINGS)
                vm_panic("VM: too many kernel mappings", index);
            kern_mappings[index].phys_addr = addr;
            kern_mappings[index].len = len;
            kern_mappings[index].flags = flags;
            kern_mappings[index].lin_addr = offset;
            kern_mappings[index].flags =
                I386_VM_PRESENT | I386_VM_USER | I386_VM_WRITE |
                global_bit;
            if(flags & VMMF_UNCACHED)
                kern_mappings[index].flags |=
                    I386_VM_PWT | I386_VM_PCD;
            if(addr % I386_PAGE_SIZE)
                vm_panic("VM: addr unaligned", addr);
            if(len % I386_PAGE_SIZE)
                vm_panic("VM: len unaligned", len);
            vir = arch_map2vir(&vmproc[VMP_SYSTEM], offset);
            if(sys_vmctl_reply_mapping(index, vir) != OK)
                vm_panic("VM: reply failed", NO_NUM);
            offset += len;
            index++;
            kernmappings++;
        }
    }

    /* Find a PDE below processes available for mapping in the
     * page directories (readonly).
     */
    pagedir_pde = free_pde++;
    pagedir_pde_val = (page_directories_phys & I386_VM_ADDR_MASK) |
                      I386_VM_PRESENT | I386_VM_USER | I386_VM_WRITE;

    /* Tell kernel about free pde's. */
    while(free_pde*I386_BIG_PAGE_SIZE < VM_PROCSTART) {
        if((r=sys_vmctl(SELF, VMCTL_I386_FREEPDE, free_pde++)) != OK) {
            vm_panic("VMCTL_I386_FREEPDE failed", r);
        }
    }

    /* first pde in use by process. */
    proc_pde = free_pde;

    kernlimit = free_pde*I386_BIG_PAGE_SIZE;

    /* Increase kernel segment to address this memory. */
    if((r=sys_vmctl(SELF, VMCTL_I386_KERNELLIMIT, kernlimit)) != OK) {
        vm_panic("VMCTL_I386_KERNELLIMIT failed", r);
    }

    kpagedir = arch_map2vir(&vmproc[VMP_SYSTEM],
                            pagedir_pde*I386_BIG_PAGE_SIZE);

    /* Tell kernel how to get at the page directories. */
    if((r=sys_vmctl(SELF, VMCTL_I386_PAGEDIRS, kpagedir)) != OK) {
        vm_panic("VMCTL_I386_KERNELLIMIT failed", r);
    }

    /* Give our process the new, copied, private page table. */
    pt_mapkernel(newpt);	/* didn't know about vm_dir pages earlier */
    pt_bind(newpt, vmp);

    /* Now actually enable paging. */
    if(sys_vmctl_enable_paging(vmp->vm_arch.vm_seg) != OK)
        vm_panic("pt_init: enable paging failed", NO_NUM);

    /* Back to reality - this is where the stack actually is. */
    vmp->vm_arch.vm_seg[S].mem_len -= extra_clicks;

    /* All OK. */
    return;
}
示例#18
0
/*===========================================================================*
 *				vm_allocpage		     		     *
 *===========================================================================*/
PUBLIC void *vm_allocpage(phys_bytes *phys, int reason)
{
    /* Allocate a page for use by VM itself. */
    phys_bytes newpage;
    vir_bytes loc;
    pt_t *pt;
    int r;
    static int level = 0;
    void *ret;

    pt = &vmp->vm_pt;
    vm_assert(reason >= 0 && reason < VMP_CATEGORIES);

    level++;

    vm_assert(level >= 1);
    vm_assert(level <= 2);

    if(level > 1 || !(vmp->vm_flags & VMF_HASPT) || !meminit_done) {
        int r;
        void *s;
        s=vm_getsparepage(phys);
        level--;
        if(!s) {
            util_stacktrace();
            printf("VM: warning: out of spare pages\n");
        }
        return s;
    }

    /* VM does have a pagetable, so get a page and map it in there.
     * Where in our virtual address space can we put it?
     */
    loc = findhole(pt,  arch_vir2map(vmp, vmp->vm_stacktop),
                   vmp->vm_arch.vm_data_top);
    if(loc == NO_MEM) {
        level--;
        printf("VM: vm_allocpage: findhole failed\n");
        return NULL;
    }

    /* Allocate page of memory for use by VM. As VM
     * is trusted, we don't have to pre-clear it.
     */
    if((newpage = ALLOC_MEM(CLICKSPERPAGE, 0)) == NO_MEM) {
        level--;
        printf("VM: vm_allocpage: ALLOC_MEM failed\n");
        return NULL;
    }

    *phys = CLICK2ABS(newpage);

    /* Map this page into our address space. */
    if((r=pt_writemap(pt, loc, *phys, I386_PAGE_SIZE,
                      I386_VM_PRESENT | I386_VM_USER | I386_VM_WRITE, 0)) != OK) {
        FREE_MEM(newpage, CLICKSPERPAGE);
        printf("vm_allocpage writemap failed\n");
        level--;
        return NULL;
    }

    if((r=sys_vmctl(SELF, VMCTL_FLUSHTLB, 0)) != OK) {
        vm_panic("VMCTL_FLUSHTLB failed", r);
    }

    level--;

    /* Return user-space-ready pointer to it. */
    ret = (void *) arch_map2vir(vmp, loc);

    return ret;
}
示例#19
0
文件: vm.c 项目: smorimura/filagree
void vm_null_check(struct context *context, const void* p) {
    vm_assert(context, p, "null pointer");
}
示例#20
0
/*===========================================================================*
 *				pt_writemap		     		     *
 *===========================================================================*/
PUBLIC int pt_writemap(pt_t *pt, vir_bytes v, phys_bytes physaddr,
                       size_t bytes, u32_t flags, u32_t writemapflags)
{
    /* Write mapping into page table. Allocate a new page table if necessary. */
    /* Page directory and table entries for this virtual address. */
    int p, pages, pdecheck;
    int finalpde;
    int verify = 0;

    if(writemapflags & WMF_VERIFY)
        verify = 1;

    vm_assert(!(bytes % I386_PAGE_SIZE));
    vm_assert(!(flags & ~(PTF_ALLFLAGS)));

    pages = bytes / I386_PAGE_SIZE;

    /* MAP_NONE means to clear the mapping. It doesn't matter
     * what's actually written into the PTE if I386_VM_PRESENT
     * isn't on, so we can just write MAP_NONE into it.
     */
#if SANITYCHECKS
    if(physaddr != MAP_NONE && !(flags & I386_VM_PRESENT)) {
        vm_panic("pt_writemap: writing dir with !P\n", NO_NUM);
    }
    if(physaddr == MAP_NONE && flags) {
        vm_panic("pt_writemap: writing 0 with flags\n", NO_NUM);
    }
#endif

    finalpde = I386_VM_PDE(v + I386_PAGE_SIZE * pages);

    /* First make sure all the necessary page tables are allocated,
     * before we start writing in any of them, because it's a pain
     * to undo our work properly. Walk the range in page-directory-entry
     * sized leaps.
     */
    for(pdecheck = I386_VM_PDE(v); pdecheck <= finalpde; pdecheck++) {
        vm_assert(pdecheck >= 0 && pdecheck < I386_VM_DIR_ENTRIES);
        if(pt->pt_dir[pdecheck] & I386_VM_BIGPAGE) {
            printf("pt_writemap: trying to write 0x%lx into 0x%lx\n",
                   physaddr, v);
            vm_panic("pt_writemap: BIGPAGE found", NO_NUM);
        }
        if(!(pt->pt_dir[pdecheck] & I386_VM_PRESENT)) {
            int r;
            if(verify) {
                printf("pt_writemap verify: no pde %d\n", pdecheck);
                return EFAULT;
            }
            vm_assert(!pt->pt_dir[pdecheck]);
            if((r=pt_ptalloc(pt, pdecheck, flags)) != OK) {
                /* Couldn't do (complete) mapping.
                 * Don't bother freeing any previously
                 * allocated page tables, they're
                 * still writable, don't point to nonsense,
                 * and pt_ptalloc leaves the directory
                 * and other data in a consistent state.
                 */
                printf("pt_writemap: pt_ptalloc failed\n", pdecheck);
                return r;
            }
        }
        vm_assert(pt->pt_dir[pdecheck] & I386_VM_PRESENT);
    }

    /* Now write in them. */
    for(p = 0; p < pages; p++) {
        u32_t entry;
        int pde = I386_VM_PDE(v);
        int pte = I386_VM_PTE(v);

        vm_assert(!(v % I386_PAGE_SIZE));
        vm_assert(pte >= 0 && pte < I386_VM_PT_ENTRIES);
        vm_assert(pde >= 0 && pde < I386_VM_DIR_ENTRIES);

        /* Page table has to be there. */
        vm_assert(pt->pt_dir[pde] & I386_VM_PRESENT);

        /* Make sure page directory entry for this page table
         * is marked present and page table entry is available.
         */
        vm_assert((pt->pt_dir[pde] & I386_VM_PRESENT) && pt->pt_pt[pde]);

#if SANITYCHECKS
        /* We don't expect to overwrite a page. */
        if(!(writemapflags & (WMF_OVERWRITE|WMF_VERIFY)))
            vm_assert(!(pt->pt_pt[pde][pte] & I386_VM_PRESENT));
#endif
        if(writemapflags & (WMF_WRITEFLAGSONLY|WMF_FREE)) {
            physaddr = pt->pt_pt[pde][pte] & I386_VM_ADDR_MASK;
        }

        if(writemapflags & WMF_FREE) {
            FREE_MEM(ABS2CLICK(physaddr), 1);
        }

        /* Entry we will write. */
        entry = (physaddr & I386_VM_ADDR_MASK) | flags;

        if(verify) {
            u32_t maskedentry;
            maskedentry = pt->pt_pt[pde][pte];
            maskedentry &= ~(I386_VM_ACC|I386_VM_DIRTY);
            /* Verify pagetable entry. */
            if(maskedentry != entry) {
                printf("pt_writemap: 0x%lx found, masked 0x%lx, 0x%lx expected\n",
                       pt->pt_pt[pde][pte], maskedentry, entry);
                return EFAULT;
            }
        } else {
            /* Write pagetable entry. */
            pt->pt_pt[pde][pte] = entry;
        }

        physaddr += I386_PAGE_SIZE;
        v += I386_PAGE_SIZE;
    }

    return OK;
}
示例#21
0
/*===========================================================================*
 *				void *slaballoc				     *
 *===========================================================================*/
void *slaballoc(int bytes)
{
	int i;
	int count = 0;
	struct slabheader *s;
	struct slabdata *firstused;

	SLABSANITYCHECK(SCL_FUNCTIONS);

	/* Retrieve entry in slabs[]. */
	GETSLAB(bytes, s);
	vm_assert(s);

	/* To make the common case more common, make space in the 'used'
	 * queue first.
	 */
	if(!LH(s, LIST_USED)) {
		/* Make sure there is something on the freelist. */
	SLABSANITYCHECK(SCL_DETAIL);
		if(!LH(s, LIST_FREE)) {
			struct slabdata *nd = newslabdata(LIST_FREE);
	SLABSANITYCHECK(SCL_DETAIL);
			if(!nd) return NULL;
			ADDHEAD(nd, s, LIST_FREE);
	SLABSANITYCHECK(SCL_DETAIL);
		}


	SLABSANITYCHECK(SCL_DETAIL);
		MOVEHEAD(s, LIST_FREE, LIST_USED);
	SLABSANITYCHECK(SCL_DETAIL);

	}
	SLABSANITYCHECK(SCL_DETAIL);

	vm_assert(s);
	firstused = LH(s, LIST_USED);
	vm_assert(firstused);
	vm_assert(firstused->sdh.magic1 == MAGIC1);
	vm_assert(firstused->sdh.magic2 == MAGIC2);
	vm_assert(firstused->sdh.nused < ITEMSPERPAGE(bytes));

	for(i = firstused->sdh.freeguess;
		count < ITEMSPERPAGE(bytes); count++, i++) {
	SLABSANITYCHECK(SCL_DETAIL);
		i = i % ITEMSPERPAGE(bytes);

		if(!GETBIT(firstused, i)) {
			struct slabdata *f;
			char *ret;
			SETBIT(firstused, i);
	SLABSANITYCHECK(SCL_DETAIL);
			if(firstused->sdh.nused == ITEMSPERPAGE(bytes)) {
	SLABSANITYCHECK(SCL_DETAIL);
				MOVEHEAD(s, LIST_USED, LIST_FULL);
	SLABSANITYCHECK(SCL_DETAIL);
			}
	SLABSANITYCHECK(SCL_DETAIL);
			ret = ((char *) firstused->data) + i*bytes;

#if SANITYCHECKS
			nojunkwarning++;
			slabunlock(ret, bytes);
			nojunkwarning--;
			vm_assert(!nojunkwarning);
			*(u32_t *) ret = NOJUNK;
			slablock(ret, bytes);
#endif
			SLABSANITYCHECK(SCL_FUNCTIONS);
			SLABDATAUSE(firstused, firstused->sdh.freeguess = i+1;);

#if SANITYCHECKS
	if(bytes >= SLABSIZES+MINSIZE) {
		printk("slaballoc: odd, bytes %d?\n", bytes);
	}
			if(!slabsane_f(__FILE__, __LINE__, ret, bytes))
				vm_panic("slaballoc: slabsane failed", NO_NUM);
#endif

			return ret;
		}

	SLABSANITYCHECK(SCL_DETAIL);

	}
示例#22
0
文件: vm.c 项目: nohajc/.KEK-on-Rails
/* this function is not thread safe */
char *kek_obj_print(kek_obj_t *kek_obj) {
	static char str[1024];

	if (kek_obj == (kek_obj_t *) 0xffffffffffffffff) {
		(void) snprintf(str, 1024, "kek_obj == 0xffffffffffffffff");
		assert(0);
		goto out;
	}

	if (kek_obj == NULL) {
		(void) snprintf(str, 1024, "kek_obj == NULL");
		goto out;
	}

	/* vm_debug(DBG_STACK | DBG_STACK_FULL, "kek_obj = %p\n", kek_obj); */
	if (!IS_PTR(kek_obj)) {
		if (IS_CHAR(kek_obj)) {
			(void) snprintf(str, 1024, "char -%c-", CHAR_VAL(kek_obj));
		} else if (IS_INT(kek_obj)) {
			(void) snprintf(str, 1024, "int -%d-", INT_VAL(kek_obj));
		}
	} else {

		vm_assert(TYPE_CHECK(kek_obj->h.t), //
				"kek_obj=%p, "//
				"type=%d, "//
				"state=%d, "//
				"is_const=%d, "//
				"fromspace=%d, "//
				"tospace=%d\n",//
				kek_obj,//
				kek_obj->h.t,//
				kek_obj->h.state,//
				vm_is_const(kek_obj),//
				gc_cheney_ptr_in_from_space(kek_obj, 1),//
				gc_cheney_ptr_in_to_space(kek_obj, 1));

		switch (kek_obj->h.t) {
		case KEK_INT:
			(void) snprintf(str, 1024, "int -%d-", INT_VAL(kek_obj));
			break;
		case KEK_STR:
			(void) snprintf(str, 1024, "str -%s-",
					((kek_string_t *) kek_obj)->string);
			break;
		case KEK_ARR:
			(void) snprintf(str, 1024, "arr -%p-", (void*) kek_obj);
			break;
		case KEK_SYM:
			(void) snprintf(str, 1024, "sym -%s-",
					((kek_symbol_t *) kek_obj)->symbol);
			break;
		case KEK_NIL:
			(void) snprintf(str, 1024, "nil");
			break;
		case KEK_UDO:
			(void) snprintf(str, 1024, "udo");
			break;
		case KEK_ARR_OBJS:
			(void) snprintf(str, 1024, "arr_objs");
			break;
		case KEK_EXINFO:
			(void) snprintf(str, 1024, "exinfo");
			break;
		case KEK_EXPT:
			(void) snprintf(str, 1024, "expt");
			break;
		case KEK_FILE:
			(void) snprintf(str, 1024, "file");
			break;
		case KEK_TERM:
			(void) snprintf(str, 1024, "term");
			break;
		case KEK_CLASS:
			(void) snprintf(str, 1024, "class");
			break;
		case KEK_STACK:
			(void) snprintf(str, 1024, "stack");
			break;
		case KEK_COPIED:
			(void) snprintf(str, 1024, "COPIED!");
			break;
		default:
			assert(0);
			break;
		}
	}

	out: /* */
	return ((char *) (&str));
}