void vm_mem_bootstrap(void) { vm_offset_t start, end; /* * Initializes resident memory structures. * From here on, all physical memory is accounted for, * and we use only virtual addresses. */ vm_page_bootstrap(&start, &end); /* * Initialize other VM packages */ slab_bootstrap(); vm_object_bootstrap(); vm_map_init(); kmem_init(start, end); pmap_init(); slab_init(); kalloc_init(); vm_fault_init(); vm_page_module_init(); memory_manager_default_init(); }
int main() { /* Simple test */ pmap_t pmap; pmap_init(&pmap); pmap.asid = 10; set_active_pmap(&pmap); vm_page_t *pg1 = pm_alloc(4); vaddr_t ex_addr = PAGESIZE * 10; pmap_map(&pmap, ex_addr, pg1->paddr, pg1->size, PMAP_VALID | PMAP_DIRTY); int *x = (int *) ex_addr; for (int i = 0; i < 1024 * pg1->size; i++) *(x + i) = i; for (int i = 0; i < 1024 * pg1->size; i++) assert(*(x + i) == i); vm_page_t *pg2 = pm_alloc(1); ex_addr = PAGESIZE * 2000; pmap_map(&pmap, ex_addr, pg2->paddr, pg2->size, PMAP_VALID | PMAP_DIRTY); x = (int *) ex_addr; for (int i = 0; i < 1024 * pg2->size; i++) *(x + i) = i; for (int i = 0; i < 1024 * pg2->size; i++) assert(*(x + i) == i); pm_free(pg1); pm_free(pg2); pmap_delete(&pmap); kprintf("Tests passed\n"); return 0; }
void vm_mem_bootstrap(void) { vm_offset_t start, end; /* * Initializes resident memory structures. * From here on, all physical memory is accounted for, * and we use only virtual addresses. */ vm_page_bootstrap(&start, &end); /* * Initialize other VM packages */ zone_bootstrap(); vm_object_bootstrap(); vm_map_init(); kmem_init(start, end); pmap_init(); zone_init((vm_size_t)ptoa(vm_page_free_count)); kalloc_init(); #if MACH_RT rtalloc_init(); #endif /* MACH_RT */ vm_fault_init(); vm_page_module_init(); memory_manager_default_init(); }
void pmap_bootstrap(void) { int error; error = pool_create(&pmap_pool, "PMAP", sizeof (struct pmap), POOL_DEFAULT); if (error != 0) panic("%s: pool_create failed: %m", __func__, error); error = pmap_init(&kernel_vm, KERNEL_BASE, KERNEL_END); if (error != 0) panic("%s: pmap_init failed: %m", __func__, error); }
/* ARGSUSED*/ static void vm_mem_init(void *dummy) { /* * Initializes resident memory structures. From here on, all physical * memory is accounted for, and we use only virtual addresses. */ vm_set_page_size(); vm_page_startup(); /* * Initialize other VM packages */ vm_object_init1(); vm_map_startup(); kmem_init(); pmap_init(); }
void uvm_init() { vaddr_t kvm_start, kvm_end; /* * step 0: ensure that the hardware set the page size */ if (uvmexp.pagesize == 0) { panic("uvm_init: page size not set"); } /* * step 1: zero the uvm structure */ memset(&uvm, 0, sizeof(uvm)); #ifndef OSKIT averunnable.fscale = FSCALE; #endif /* * step 2: init the page sub-system. this includes allocating the * vm_page structures, and setting up all the page queues (and * locks). available memory will be put in the "free" queue. * kvm_start and kvm_end will be set to the area of kernel virtual * memory which is available for general use. */ uvm_page_init(&kvm_start, &kvm_end); /* * step 3: init the map sub-system. allocates the static pool of * vm_map_entry structures that are used for "special" kernel maps * (e.g. kernel_map, kmem_map, etc...). */ uvm_map_init(); /* * step 4: setup the kernel's virtual memory data structures. this * includes setting up the kernel_map/kernel_object and the kmem_map/ * kmem_object. */ uvm_km_init(kvm_start, kvm_end); /* * step 5: init the pmap module. the pmap module is free to allocate * memory for its private use (e.g. pvlists). */ pmap_init(); /* * step 6: init the kernel memory allocator. after this call the * kernel memory allocator (malloc) can be used. */ kmeminit(); /* * step 7: init all pagers and the pager_map. */ uvm_pager_init(); /* * step 8: init anonymous memory systems (both amap and anons) */ amap_init(); /* init amap module */ uvm_anon_init(); /* allocate initial anons */ /* * the VM system is now up! now that malloc is up we can resize the * <obj,off> => <page> hash table for general use and enable paging * of kernel objects. */ uvm_page_rehash(); uao_create(VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS, UAO_FLAG_KERNSWAP); /* * done! */ return; }
void uvm_init() { vaddr_t kvm_start, kvm_end; /* * step 0: ensure that the hardware set the page size */ if (uvmexp.pagesize == 0) { panic("uvm_init: page size not set"); } /* * step 1: zero the uvm structure */ memset(&uvm, 0, sizeof(uvm)); averunnable.fscale = FSCALE; /* * step 2: init the page sub-system. this includes allocating the * vm_page structures, and setting up all the page queues (and * locks). available memory will be put in the "free" queue. * kvm_start and kvm_end will be set to the area of kernel virtual * memory which is available for general use. */ uvm_page_init(&kvm_start, &kvm_end); /* * step 3: init the map sub-system. allocates the static pool of * vm_map_entry structures that are used for "special" kernel maps * (e.g. kernel_map, kmem_map, etc...). */ uvm_map_init(); /* * step 4: setup the kernel's virtual memory data structures. this * includes setting up the kernel_map/kernel_object and the kmem_map/ * kmem_object. */ uvm_km_init(kvm_start, kvm_end); /* * step 5: init the pmap module. the pmap module is free to allocate * memory for its private use (e.g. pvlists). */ pmap_init(); /* * step 6: init the kernel memory allocator. after this call the * kernel memory allocator (malloc) can be used. */ uvm_km_page_init(); kmeminit(); #if !defined(__HAVE_PMAP_DIRECT) kthread_create_deferred(uvm_km_createthread, NULL); #endif /* * step 7: init all pagers and the pager_map. */ uvm_pager_init(); /* * step 8: init anonymous memory system */ amap_init(); /* init amap module */ /* * the VM system is now up! now that malloc is up we can resize the * <obj,off> => <page> hash table for general use and enable paging * of kernel objects. */ uvm_page_rehash(); uao_create(VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS, UAO_FLAG_KERNSWAP); /* * reserve some unmapped space for malloc/pool use after free usage */ #ifdef DEADBEEF0 kvm_start = trunc_page(DEADBEEF0) - PAGE_SIZE; if (uvm_map(kernel_map, &kvm_start, 3 * PAGE_SIZE, NULL, UVM_UNKNOWN_OFFSET, 0, UVM_MAPFLAG(UVM_PROT_NONE, UVM_PROT_NONE, UVM_INH_NONE, UVM_ADV_RANDOM, UVM_FLAG_FIXED))) panic("uvm_init: cannot reserve dead beef @0x%x\n", DEADBEEF0); #endif #ifdef DEADBEEF1 kvm_start = trunc_page(DEADBEEF1) - PAGE_SIZE; if (uvm_map(kernel_map, &kvm_start, 3 * PAGE_SIZE, NULL, UVM_UNKNOWN_OFFSET, 0, UVM_MAPFLAG(UVM_PROT_NONE, UVM_PROT_NONE, UVM_INH_NONE, UVM_ADV_RANDOM, UVM_FLAG_FIXED))) panic("uvm_init: cannot reserve dead beef @0x%x\n", DEADBEEF1); #endif /* * init anonymous memory systems */ uvm_anon_init(); }
void uvm_init(void) { vaddr_t kvm_start, kvm_end; /* * step 0: ensure that the hardware set the page size */ if (uvmexp.pagesize == 0) { panic("uvm_init: page size not set"); } /* * step 1: zero the uvm structure */ memset(&uvm, 0, sizeof(uvm)); averunnable.fscale = FSCALE; uvm_amap_init(); /* * step 2: init the page sub-system. this includes allocating the * vm_page structures, and setting up all the page queues (and * locks). available memory will be put in the "free" queue. * kvm_start and kvm_end will be set to the area of kernel virtual * memory which is available for general use. */ uvm_page_init(&kvm_start, &kvm_end); /* * step 3: init the map sub-system. allocates the static pool of * vm_map_entry structures that are used for "special" kernel maps * (e.g. kernel_map, kmem_map, etc...). */ uvm_map_init(); /* * step 4: setup the kernel's virtual memory data structures. this * includes setting up the kernel_map/kernel_object. */ uvm_km_init(kvm_start, kvm_end); /* * step 5: init the pmap module. the pmap module is free to allocate * memory for its private use (e.g. pvlists). */ pmap_init(); /* * step 6: init the kernel memory allocator. after this call the * kernel memory allocator (malloc) can be used. this includes * setting up the kmem_map. */ kmeminit(); #ifdef DEBUG debug_init(); #endif /* * step 7: init all pagers and the pager_map. */ uvm_pager_init(); /* * step 8: init the uvm_loan() facility. */ uvm_loan_init(); /* * Initialize pools. This must be done before anyone manipulates * any vm_maps because we use a pool for some map entry structures. */ pool_subsystem_init(); /* * init slab memory allocator kmem(9). */ kmem_init(); /* * the VM system is now up! now that kmem is up we can resize the * <obj,off> => <page> hash table for general use and enable paging * of kernel objects. */ uao_create(VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS, UAO_FLAG_KERNSWAP); uvmpdpol_reinit(); /* * init anonymous memory systems */ uvm_anon_init(); uvm_uarea_init(); /* * init readahead module */ uvm_ra_init(); }
void uvm_init(void) { vaddr_t kvm_start, kvm_end; /* * step 0: ensure that the hardware set the page size */ if (uvmexp.pagesize == 0) { panic("uvm_init: page size not set"); } /* * step 1: set up stats. */ averunnable.fscale = FSCALE; /* * step 2: init the page sub-system. this includes allocating the * vm_page structures, and setting up all the page queues (and * locks). available memory will be put in the "free" queue. * kvm_start and kvm_end will be set to the area of kernel virtual * memory which is available for general use. */ uvm_page_init(&kvm_start, &kvm_end); /* * step 3: init the map sub-system. allocates the static pool of * vm_map_entry structures that are used for "special" kernel maps * (e.g. kernel_map, kmem_map, etc...). */ uvm_map_init(); /* * step 4: setup the kernel's virtual memory data structures. this * includes setting up the kernel_map/kernel_object and the kmem_map/ * kmem_object. */ uvm_km_init(kvm_start, kvm_end); /* * step 5: init the pmap module. the pmap module is free to allocate * memory for its private use (e.g. pvlists). */ pmap_init(); /* * step 6: init the kernel memory allocator. after this call the * kernel memory allocator (malloc) can be used. */ kmeminit(); /* * step 6.5: init the dma allocator, which is backed by pools. */ dma_alloc_init(); /* * step 7: init all pagers and the pager_map. */ uvm_pager_init(); /* * step 8: init anonymous memory system */ amap_init(); /* init amap module */ /* * step 9: init uvm_km_page allocator memory. */ uvm_km_page_init(); /* * the VM system is now up! now that malloc is up we can * enable paging of kernel objects. */ uao_create(VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS, UAO_FLAG_KERNSWAP); /* * reserve some unmapped space for malloc/pool use after free usage */ #ifdef DEADBEEF0 kvm_start = trunc_page(DEADBEEF0) - PAGE_SIZE; if (uvm_map(kernel_map, &kvm_start, 3 * PAGE_SIZE, NULL, UVM_UNKNOWN_OFFSET, 0, UVM_MAPFLAG(UVM_PROT_NONE, UVM_PROT_NONE, UVM_INH_NONE, UVM_ADV_RANDOM, UVM_FLAG_FIXED))) panic("uvm_init: cannot reserve dead beef @0x%x", DEADBEEF0); #endif #ifdef DEADBEEF1 kvm_start = trunc_page(DEADBEEF1) - PAGE_SIZE; if (uvm_map(kernel_map, &kvm_start, 3 * PAGE_SIZE, NULL, UVM_UNKNOWN_OFFSET, 0, UVM_MAPFLAG(UVM_PROT_NONE, UVM_PROT_NONE, UVM_INH_NONE, UVM_ADV_RANDOM, UVM_FLAG_FIXED))) panic("uvm_init: cannot reserve dead beef @0x%x", DEADBEEF1); #endif /* * init anonymous memory systems */ uvm_anon_init(); #ifndef SMALL_KERNEL /* * Switch kernel and kmem_map over to a best-fit allocator, * instead of walking the tree. */ uvm_map_set_uaddr(kernel_map, &kernel_map->uaddr_any[3], uaddr_bestfit_create(vm_map_min(kernel_map), vm_map_max(kernel_map))); uvm_map_set_uaddr(kmem_map, &kmem_map->uaddr_any[3], uaddr_bestfit_create(vm_map_min(kmem_map), vm_map_max(kmem_map))); #endif /* !SMALL_KERNEL */ }
// Called first from entry.S on the bootstrap processor, // and later from boot/bootother.S on all other processors. // As a rule, "init" functions in PIOS are called once on EACH processor. void init(void) { extern char start[], edata[], end[]; // Before anything else, complete the ELF loading process. // Clear all uninitialized global data (BSS) in our program, // ensuring that all static/global variables start out zero. if (cpu_onboot()) memset(edata, 0, end - edata); // Initialize the console. // Can't call cprintf until after we do this! cons_init(); extern uint8_t _binary_obj_boot_bootother_start[], _binary_obj_boot_bootother_size[]; uint8_t *code = (uint8_t*)lowmem_bootother_vec; memmove(code, _binary_obj_boot_bootother_start, (uint32_t) _binary_obj_boot_bootother_size); // Lab 1: test cprintf and debug_trace cprintf("1234 decimal is %o octal!\n", 1234); debug_check(); // Initialize and load the bootstrap CPU's GDT, TSS, and IDT. cpu_init(); trap_init(); // Physical memory detection/initialization. // Can't call mem_alloc until after we do this! mem_init(); // Lab 2: check spinlock implementation if (cpu_onboot()) spinlock_check(); // Initialize the paged virtual memory system. pmap_init(); // Find and start other processors in a multiprocessor system mp_init(); // Find info about processors in system pic_init(); // setup the legacy PIC (mainly to disable it) ioapic_init(); // prepare to handle external device interrupts lapic_init(); // setup this CPU's local APIC cpu_bootothers(); // Get other processors started // cprintf("CPU %d (%s) has booted\n", cpu_cur()->id, // cpu_onboot() ? "BP" : "AP"); file_init(); // Create root directory and console I/O files // Lab 4: uncomment this when you can handle IRQ_SERIAL and IRQ_KBD. //cons_intenable(); // Let the console start producing interrupts // Initialize the process management code. proc_init(); // Initialize the process management code. proc_init(); if(!cpu_onboot()) proc_sched(); proc *root = proc_root = proc_alloc(NULL,0); elfhdr *ehs = (elfhdr *)ROOTEXE_START; assert(ehs->e_magic == ELF_MAGIC); proghdr *phs = (proghdr *) ((void *) ehs + ehs->e_phoff); proghdr *ep = phs + ehs->e_phnum; for (; phs < ep; phs++) { if (phs->p_type != ELF_PROG_LOAD) continue; void *fa = (void *) ehs + ROUNDDOWN(phs->p_offset, PAGESIZE); uint32_t va = ROUNDDOWN(phs->p_va, PAGESIZE); uint32_t zva = phs->p_va + phs->p_filesz; uint32_t eva = ROUNDUP(phs->p_va + phs->p_memsz, PAGESIZE); uint32_t perm = SYS_READ | PTE_P | PTE_U; if(phs->p_flags & ELF_PROG_FLAG_WRITE) perm |= SYS_WRITE | PTE_W; for (; va < eva; va += PAGESIZE, fa += PAGESIZE) { pageinfo *pi = mem_alloc(); assert(pi != NULL); if(va < ROUNDDOWN(zva, PAGESIZE)) memmove(mem_pi2ptr(pi), fa, PAGESIZE); else if (va < zva && phs->p_filesz) { memset(mem_pi2ptr(pi),0, PAGESIZE); memmove(mem_pi2ptr(pi), fa, zva-va); } else memset(mem_pi2ptr(pi), 0, PAGESIZE); pte_t *pte = pmap_insert(root->pdir, pi, va, perm); assert(pte != NULL); } } root->sv.tf.eip = ehs->e_entry; root->sv.tf.eflags |= FL_IF; pageinfo *pi = mem_alloc(); assert(pi != NULL); pte_t *pte = pmap_insert(root->pdir, pi, VM_STACKHI-PAGESIZE, SYS_READ | SYS_WRITE | PTE_P | PTE_U | PTE_W); assert(pte != NULL); root->sv.tf.esp = VM_STACKHI; proc_ready(root); proc_sched(); // Initialize the I/O system. // Lab 1: change this so it enters user() in user mode, // running on the user_stack declared above, // instead of just calling user() directly. user(); // FIXME: Maybe get rid of this }
void uvm_init(void) { vaddr_t kvm_start, kvm_end; /* * Ensure that the hardware set the page size, zero the UVM structure. */ if (uvmexp.pagesize == 0) { panic("uvm_init: page size not set"); } memset(&uvm, 0, sizeof(uvm)); averunnable.fscale = FSCALE; /* * Init the page sub-system. This includes allocating the vm_page * structures, and setting up all the page queues (and locks). * Available memory will be put in the "free" queue, kvm_start and * kvm_end will be set to the area of kernel virtual memory which * is available for general use. */ uvm_page_init(&kvm_start, &kvm_end); /* * Init the map sub-system. */ uvm_map_init(); /* * Setup the kernel's virtual memory data structures. This includes * setting up the kernel_map/kernel_object. * Bootstrap all kernel memory allocators. */ uao_init(); uvm_km_bootstrap(kvm_start, kvm_end); /* * Setup uvm_map caches and init the amap. */ uvm_map_init_caches(); uvm_amap_init(); /* * Init the pmap module. The pmap module is free to allocate * memory for its private use (e.g. pvlists). */ pmap_init(); /* * Make kernel memory allocators ready for use. * After this call the pool/kmem memory allocators can be used. */ uvm_km_init(); #ifdef DEBUG debug_init(); #endif /* * Init all pagers and the pager_map. */ uvm_pager_init(); /* * Initialize the uvm_loan() facility. */ uvm_loan_init(); /* * Init emap subsystem. */ uvm_emap_sysinit(); /* * The VM system is now up! Now that kmem is up we can resize the * <obj,off> => <page> hash table for general use and enable paging * of kernel objects. */ uao_create(VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS, UAO_FLAG_KERNSWAP); uvmpdpol_reinit(); /* * Init anonymous memory systems. */ uvm_anon_init(); uvm_uarea_init(); /* * Init readahead mechanism. */ uvm_ra_init(); }