void init_vm(void) { int s, i; static struct memory mem_chunks[NR_MEMS]; static struct boot_image *ip; extern void __minix_init(void); multiboot_module_t *mod; vir_bytes kern_dyn, kern_static; #if SANITYCHECKS incheck = nocheck = 0; #endif /* Retrieve various crucial boot parameters */ if(OK != (s=sys_getkinfo(&kernel_boot_info))) { panic("couldn't get bootinfo: %d", s); } /* Turn file mmap on? */ env_parse("filemap", "d", 0, &enable_filemap, 0, 1); /* Sanity check */ assert(kernel_boot_info.mmap_size > 0); assert(kernel_boot_info.mods_with_kernel > 0); /* Get chunks of available memory. */ get_mem_chunks(mem_chunks); /* Set table to 0. This invalidates all slots (clear VMF_INUSE). */ memset(vmproc, 0, sizeof(vmproc)); for(i = 0; i < ELEMENTS(vmproc); i++) { vmproc[i].vm_slot = i; } /* Initialize ACL data structures. */ acl_init(); /* region management initialization. */ map_region_init(); /* Initialize tables to all physical memory. */ mem_init(mem_chunks); /* Architecture-dependent initialization. */ init_proc(VM_PROC_NR); pt_init(); /* The kernel's freelist does not include boot-time modules; let * the allocator know that the total memory is bigger. */ for (mod = &kernel_boot_info.module_list[0]; mod < &kernel_boot_info.module_list[kernel_boot_info.mods_with_kernel-1]; mod++) { phys_bytes len = mod->mod_end-mod->mod_start+1; len = roundup(len, VM_PAGE_SIZE); mem_add_total_pages(len/VM_PAGE_SIZE); } kern_dyn = kernel_boot_info.kernel_allocated_bytes_dynamic; kern_static = kernel_boot_info.kernel_allocated_bytes; kern_static = roundup(kern_static, VM_PAGE_SIZE); mem_add_total_pages((kern_dyn + kern_static)/VM_PAGE_SIZE); /* Give these processes their own page table. */ for (ip = &kernel_boot_info.boot_procs[0]; ip < &kernel_boot_info.boot_procs[NR_BOOT_PROCS]; ip++) { struct vmproc *vmp; if(ip->proc_nr < 0) continue; assert(ip->start_addr); /* VM has already been set up by the kernel and pt_init(). * Any other boot process is already in memory and is set up * here. */ if(ip->proc_nr == VM_PROC_NR) continue; vmp = init_proc(ip->proc_nr); exec_bootproc(vmp, ip); /* Free the file blob */ assert(!(ip->start_addr % VM_PAGE_SIZE)); ip->len = roundup(ip->len, VM_PAGE_SIZE); free_mem(ABS2CLICK(ip->start_addr), ABS2CLICK(ip->len)); } /* Set up table of calls. */ #define CALLMAP(code, func) { int i; \ i=CALLNUMBER(code); \ assert(i >= 0); \ assert(i < NR_VM_CALLS); \ vm_calls[i].vmc_func = (func); \ vm_calls[i].vmc_name = #code; \ } /* Set call table to 0. This invalidates all calls (clear * vmc_func). */ memset(vm_calls, 0, sizeof(vm_calls)); /* Basic VM calls. */ CALLMAP(VM_MMAP, do_mmap); CALLMAP(VM_MUNMAP, do_munmap); CALLMAP(VM_MAP_PHYS, do_map_phys); CALLMAP(VM_UNMAP_PHYS, do_munmap); /* Calls from PM. */ CALLMAP(VM_EXIT, do_exit); CALLMAP(VM_FORK, do_fork); CALLMAP(VM_BRK, do_brk); CALLMAP(VM_WILLEXIT, do_willexit); CALLMAP(VM_NOTIFY_SIG, do_notify_sig); /* Calls from VFS. */ CALLMAP(VM_VFS_REPLY, do_vfs_reply); CALLMAP(VM_VFS_MMAP, do_vfs_mmap); /* Calls from RS */ CALLMAP(VM_RS_SET_PRIV, do_rs_set_priv); CALLMAP(VM_RS_UPDATE, do_rs_update); CALLMAP(VM_RS_MEMCTL, do_rs_memctl); /* Calls from RS/VFS */ CALLMAP(VM_PROCCTL, do_procctl); /* Generic calls. */ CALLMAP(VM_REMAP, do_remap); CALLMAP(VM_REMAP_RO, do_remap); CALLMAP(VM_GETPHYS, do_get_phys); CALLMAP(VM_SHM_UNMAP, do_munmap); CALLMAP(VM_GETREF, do_get_refcount); CALLMAP(VM_INFO, do_info); CALLMAP(VM_QUERY_EXIT, do_query_exit); CALLMAP(VM_WATCH_EXIT, do_watch_exit); /* Cache blocks. */ CALLMAP(VM_MAPCACHEPAGE, do_mapcache); CALLMAP(VM_SETCACHEPAGE, do_setcache); /* getrusage */ CALLMAP(VM_GETRUSAGE, do_getrusage); /* Initialize the structures for queryexit */ init_query_exit(); /* Acquire kernel ipc vectors that weren't available * before VM had determined kernel mappings */ __minix_init(); }
void init_vm(void) { int s, i; static struct memory mem_chunks[NR_MEMS]; static struct boot_image *ip; extern void __minix_init(void); #if SANITYCHECKS incheck = nocheck = 0; #endif /* Retrieve various crucial boot parameters */ if(OK != (s=sys_getkinfo(&kernel_boot_info))) { panic("couldn't get bootinfo: %d", s); } /* Sanity check */ assert(kernel_boot_info.mmap_size > 0); assert(kernel_boot_info.mods_with_kernel > 0); #if SANITYCHECKS env_parse("vm_sanitychecklevel", "d", 0, &vm_sanitychecklevel, 0, SCL_MAX); #endif /* Get chunks of available memory. */ get_mem_chunks(mem_chunks); /* Set table to 0. This invalidates all slots (clear VMF_INUSE). */ memset(vmproc, 0, sizeof(vmproc)); for(i = 0; i < ELEMENTS(vmproc); i++) { vmproc[i].vm_slot = i; } /* region management initialization. */ map_region_init(); /* Initialize tables to all physical memory. */ mem_init(mem_chunks); /* Architecture-dependent initialization. */ init_proc(VM_PROC_NR); pt_init(); /* Give these processes their own page table. */ for (ip = &kernel_boot_info.boot_procs[0]; ip < &kernel_boot_info.boot_procs[NR_BOOT_PROCS]; ip++) { struct vmproc *vmp; if(ip->proc_nr < 0) continue; assert(ip->start_addr); /* VM has already been set up by the kernel and pt_init(). * Any other boot process is already in memory and is set up * here. */ if(ip->proc_nr == VM_PROC_NR) continue; vmp = init_proc(ip->proc_nr); exec_bootproc(vmp, ip); /* Free the file blob */ assert(!(ip->start_addr % VM_PAGE_SIZE)); ip->len = roundup(ip->len, VM_PAGE_SIZE); free_mem(ABS2CLICK(ip->start_addr), ABS2CLICK(ip->len)); } /* Set up table of calls. */ #define CALLMAP(code, func) { int i; \ i=CALLNUMBER(code); \ assert(i >= 0); \ assert(i < NR_VM_CALLS); \ vm_calls[i].vmc_func = (func); \ vm_calls[i].vmc_name = #code; \ } /* Set call table to 0. This invalidates all calls (clear * vmc_func). */ memset(vm_calls, 0, sizeof(vm_calls)); /* Basic VM calls. */ CALLMAP(VM_MMAP, do_mmap); CALLMAP(VM_MUNMAP, do_munmap); CALLMAP(VM_MAP_PHYS, do_map_phys); CALLMAP(VM_UNMAP_PHYS, do_munmap); /* Calls from PM. */ CALLMAP(VM_EXIT, do_exit); CALLMAP(VM_FORK, do_fork); CALLMAP(VM_BRK, do_brk); CALLMAP(VM_WILLEXIT, do_willexit); CALLMAP(VM_NOTIFY_SIG, do_notify_sig); /* Calls from RS */ CALLMAP(VM_RS_SET_PRIV, do_rs_set_priv); CALLMAP(VM_RS_UPDATE, do_rs_update); CALLMAP(VM_RS_MEMCTL, do_rs_memctl); /* Calls from RS/VFS */ CALLMAP(VM_PROCCTL, do_procctl); /* Generic calls. */ CALLMAP(VM_REMAP, do_remap); CALLMAP(VM_REMAP_RO, do_remap); CALLMAP(VM_GETPHYS, do_get_phys); CALLMAP(VM_SHM_UNMAP, do_munmap); CALLMAP(VM_GETREF, do_get_refcount); CALLMAP(VM_INFO, do_info); CALLMAP(VM_QUERY_EXIT, do_query_exit); CALLMAP(VM_WATCH_EXIT, do_watch_exit); CALLMAP(VM_FORGETBLOCKS, do_forgetblocks); CALLMAP(VM_FORGETBLOCK, do_forgetblock); CALLMAP(VM_YIELDBLOCKGETBLOCK, do_yieldblockgetblock); /* Initialize the structures for queryexit */ init_query_exit(); /* Acquire kernel ipc vectors that weren't available * before VM had determined kernel mappings */ __minix_init(); }