paddr_t swap_out(int flag){ struct coremap *local_coremap = coremap_list; struct coremap *swap_coremap = NULL ;//coremap_list; uint32_t min = 4294967295; //struct tlbshootdown *tlbsd = (struct tlbshootdown*)kmalloc(sizeof(struct tlbshootdown)) ; struct tlbshootdown tlbsd ; while(local_coremap!=NULL){ if(local_coremap->status == 0 && local_coremap->timestamp <= min /*&& local_coremap->swap == 0*/){ min = local_coremap->timestamp; swap_coremap = local_coremap; } local_coremap = local_coremap->next; } if(swap_coremap == NULL){ panic("Could not find page in other processes\n"); } struct addrspace *temp = swap_coremap->as; struct page_table_entry *pt = temp->page_table; while(pt!=NULL){ if(pt->pa == swap_coremap->pa && pt->state!=1) break; pt=pt->next; } off_t off = 0; if(pt == NULL) panic("swap_out : test panic \n");//off = write_to_swap(PADDR_TO_KVADDR(swap_coremap->pa),-1) ; else{ //swap_coremap->swap = 1; //if(swap_coremap->as == curthread->t_addrspace){ tlbsd.ts_vaddr = swap_coremap->va ; vm_tlbshootdown(&tlbsd) ; //} off = write_to_swap(PADDR_TO_KVADDR(swap_coremap->pa),pt->offset) ; pt->state = PAGE_ON_DISK; pt->offset = off; } bzero((void *)PADDR_TO_KVADDR(swap_coremap->pa),PAGE_SIZE); swap_coremap->timestamp = localtime; localtime++; swap_coremap->pages=1; if(flag){ //We are swapping to allocate a kernel page. swap_coremap->status = 1; swap_coremap->as=NULL; } else{ //We are swapping to allocate user page swap_coremap->status = 0 ; swap_coremap->as=curthread->t_addrspace; } //swap_coremap->swap = 0; return swap_coremap->pa; }
void vm_bootstrap() { paddr_t firstaddr, lastaddr,freeaddr; ram_getsize(&firstaddr, &lastaddr); int total_page_num = ((lastaddr - firstaddr) / PAGE_SIZE) ; if(total_page_num >= 192 && total_page_num<445) total_page_num-=1; else if(total_page_num >= 445 && total_page_num<950) total_page_num-=3; else if(total_page_num >= 950) total_page_num-=6; coremap_list = (struct coremap*)PADDR_TO_KVADDR(firstaddr); struct coremap *head = coremap_list ; vm_initialized = 0 ; int page_num = 0 ; for (page_num = 1 ; page_num < total_page_num ; page_num++ ) { freeaddr = firstaddr + page_num * sizeof(struct coremap); coremap_list->next = (struct coremap*)PADDR_TO_KVADDR(freeaddr); coremap_list->status = 0x6 ; coremap_list->timestamp = 0 ; coremap_list->as = NULL ; coremap_list->swap = 0; coremap_list = coremap_list->next ; } coremap_list->next = NULL; coremap_list->status = 0x10 ; coremap_list->timestamp = 0 ; coremap_list=head; freeaddr = firstaddr + page_num * sizeof(struct coremap); paddr_t page_start = freeaddr & 0xfffff000 ; page_start = page_start + 0x1000 ; while(coremap_list->next != NULL) { coremap_list->pa = page_start ; coremap_list->va = PADDR_TO_KVADDR(page_start); page_start = page_start + 0x1000 ; coremap_list = coremap_list->next ; } coremap_list->next=NULL; coremap_list=head; vm_initialized = 1 ; //file_lock=lock_create("FileLock"); coremap_lock = lock_create("CoremapLock"); }
void coremap_copy_page(paddr_t oldpaddr, paddr_t newpaddr) { vaddr_t oldva, newva; KASSERT(oldpaddr != newpaddr); KASSERT(coremap_pageispinned(oldpaddr)); KASSERT(coremap_pageispinned(newpaddr)); oldva = PADDR_TO_KVADDR(oldpaddr); newva = PADDR_TO_KVADDR(newpaddr); memcpy((char *)newva, (char *)oldva, PAGE_SIZE); }
static int load_segment(struct addrspace *as, struct vnode *v, off_t offset, vaddr_t vaddr, size_t memsize, size_t filesize, int is_executable) { KASSERT(vaddr < 0x80000000); DEBUG(DB_VM, "Running load segment %p, size %d\n", (void*)vaddr, memsize); struct iovec iov; struct uio u; int result; if (filesize > memsize) { DEBUG(DB_VM,"ELF: warning: segment filesize > segment memsize\n"); filesize = memsize; } DEBUG(DB_EXEC, "ELF: Loading %lu bytes to 0x%lx\n", (unsigned long) filesize, (unsigned long) vaddr); struct segment *seg; paddr_t kpaddr; result = as_which_seg(as, vaddr, &seg); if (result) return result; result = seg_translate(seg, vaddr, &kpaddr); if (result) return result; (void) is_executable; iov.iov_ubase = (userptr_t)PADDR_TO_KVADDR(kpaddr); iov.iov_len = memsize; // length of the memory space u.uio_iov = &iov; u.uio_iovcnt = 1; u.uio_resid = filesize; // amount to read from the file u.uio_offset = offset; u.uio_segflg = UIO_SYSSPACE;// is_executable ? UIO_USERISPACE : UIO_USERSPACE; u.uio_rw = UIO_READ; u.uio_space = NULL; DEBUG(DB_VM,"ELF loading segment into %p(%p) as %p\n", (void*)PADDR_TO_KVADDR(kpaddr), (void*) kpaddr,(void*)vaddr); result = VOP_READ(v, &u); if (result) { return result; } DEBUG(DB_VM, "\tDone loading\n"); return result; }
// Allocate/free user-level page paddr_t coremap_ualloc(struct page* p) { struct cm_entry* cm_begin = (struct cm_entry*)PADDR_TO_KVADDR(coremap); spinlock_acquire(&cm_lock); //int before = coremap_ucount(); int i; for(i=0;i<(int)coremap_size;i++) { if(cm_begin[i].in_use==0) { cm_begin[i].is_kern = 0; cm_begin[i].in_use = 1; cm_begin[i].pg = p; free_mem_pages--; spinlock_release(&cm_lock); return ram_start+(i*PAGE_SIZE); } } //int after = coremap_ucount(); //KASSERT(before+1 == after); spinlock_release(&cm_lock); return 0; // no swapping yet, so just return 0 for failure }
/* Allocate/free some kernel-space virtual pages */ vaddr_t alloc_kpages(int npages) { paddr_t pa; if(vm_initialized){ if(npages == 1){ pa=page_alloc(); } else{ pa=alloc_npages(npages); } if(pa==0){ panic("alloc_npages could not find an empty page\n"); return 0; } } else{ pa = getppages(npages); if (pa==0) { panic("getppages could not find an empty page\n"); return 0; } } return PADDR_TO_KVADDR(pa); }
paddr_t page_alloc() { struct coremap *local_coremap = coremap_list ; lock_acquire(coremap_lock); while(local_coremap->next != NULL) { if((local_coremap->status & 0x2) == 2 ) { local_coremap->timestamp = localtime ; localtime++ ; local_coremap->pages=1; local_coremap->status = 1 ; local_coremap->as = NULL ; bzero((void *)PADDR_TO_KVADDR(local_coremap->pa),PAGE_SIZE); lock_release(coremap_lock); return local_coremap->pa ; } local_coremap = local_coremap->next ; } paddr_t pa = 0 ; pa = swap_out(1) ; lock_release(coremap_lock); return pa ; }
static void swaponepageout(struct page* pg, paddr_t phyaddr) { int swapPageindex = pg->pt_pagebase; struct iovec iov; struct uio kuio; iov.iov_kbase = (void*) PADDR_TO_KVADDR(phyaddr); iov.iov_len = PAGE_SIZE; // length of the memory space kuio.uio_iov = &iov; kuio.uio_iovcnt = 1; kuio.uio_resid = PAGE_SIZE; // amount to write to the file kuio.uio_space = NULL; kuio.uio_offset = swap_map[swapPageindex].se_paddr * PAGE_SIZE; kuio.uio_segflg = UIO_SYSSPACE; kuio.uio_rw = UIO_READ; // 4. write them to disk int result = VOP_READ(swap_vnode, &kuio); if (result) { // release lock on the vnode panic("READ FAILED!\n"); return; } swap_map[swapPageindex].se_used = SWAP_PAGE_FREE; kprintf("Swap out:\tswap= %x,\tpage=%x \n",swapPageindex,pg->pt_virtbase); pg->pt_state = PT_STATE_MAPPED; pg->pt_pagebase = phyaddr / PAGE_SIZE; }
void coremap_init(int num_pages, paddr_t mem_start, paddr_t cm_start) { int spl = splhigh(); spinlock_init(&cm_lock); // initialize coremap lock coremap_size = num_pages-1; ram_start = mem_start; coremap = cm_start; struct cm_entry* c = (struct cm_entry*)PADDR_TO_KVADDR(coremap); // the initial coremap entry will be at the start address of the coremap int i; //spinlock_acquire(&cm_lock); for (i=0; i<num_pages; i++) { c[i].is_kern = 0; c[i].in_use = 0; c[i].pg = NULL; c[i].contig_pages = 0; } //spinlock_release(&cm_lock); free_mem_pages = num_pages-1; reserved_page = c[num_pages-1]; splx(spl); }
/* * Swapin() * ----------------------- * 1. Sanity checks: We can't swap the pages holding the page table itself. * So, check if the paddr lie outside of coremap or not. * 2. We use mk_kuio to intiate a read from disk to physical memory. * 3. Remove the mapping of the page in the swaparea and unmark the swapmap * bitmap. * 4. Read into the page from disk. */ void swapin(u_int32_t paddr, u_int32_t chunk) { /* * sanity check: make sure that we are not touching kernel space or the page * table itself .That is the page should be within the coremap memory * starting from coremap_base */ assert(paddr >= coremap_base); int spl=splhigh(); /* * Initialize the read I/O into kernel buffer of size PAGE_SIZE starting * from paddr from the swaparea starting from offset indexed by chunk. */ struct uio swap_uio; mk_kuio(&swap_uio, /*kernel buffer*/(void*)PADDR_TO_KVADDR(paddr & PAGE_FRAME), /*Size of the buffer to read into*/PAGE_SIZE, /*Starting offset of the swap area for read out */chunk, UIO_READ); /* * Remove the mapping of the chunk to page in the swaparea and unmark the * swap_memmap bitmap to free the chunk. */ remove_spage(chunk); splx(spl); //Now we read the page from memory into kernel buffer pointed with paddr int result=VOP_READ(swap_fp, &swap_uio); if(result) panic("VM: SWAPIN Failed"); }
/* Allocate a page in a user address space */ static void allocate_nonfixed_page(size_t page_num, struct addrspace *as, vaddr_t va, int permissions) { // KASSERT(spinlock_do_i_hold(&stealmem_lock)); KASSERT(core_map[page_num].state == FREE); //Allocate a page core_map[page_num].state = LOCKED; paddr_t pa = page_num * PAGE_SIZE; core_map[page_num].pa = pa; core_map[page_num].va = va; core_map[page_num].as = as; //Get the page table for the virtual address. struct page_table *pt = pgdir_walk(as,va,true); KASSERT(pt != NULL); KASSERT(pt != 0x0); //Update the page table entry to point to the page we made. size_t pt_index = VA_TO_PT_INDEX(va); vaddr_t page_location = PADDR_TO_KVADDR(core_map[page_num].pa); pt->table[pt_index] = PAGEVA_TO_PTE(page_location); // DEBUG(DB_VM, "VA:%p\n", (void*) va); // DEBUG(DB_VM, "PTE:%p\n", (void*) pt->table[pt_index]); // DEBUG(DB_VM, "PFN:%p\n", (void*) PTE_TO_PFN(pt->table[pt_index])); //Add in permissions pt->table[pt_index] |= permissions; zero_page(page_num); free_pages--; // DEBUG(DB_VM, "A:%d\n",free_pages); }
/* Free a page, either user or kernel. */ void free_kpages(vaddr_t addr) { if(addr < 0x80000000) { panic("Tried to free a direct-mapped address\n"); } // bool lock = get_coremap_lock(); /* Disable interrupts on this CPU while frobbing the TLB. */ //int spl = splhigh(); //bool lock = get_coremap_lock(); // kprintf("Freeing VA:%p\n", (void*) addr); KASSERT(page_count > 0); for(size_t i = 0;i<page_count;i++) { vaddr_t page_location = PADDR_TO_KVADDR(core_map[i].pa); // DEBUG(DB_VM, "Page locaion:%p\n", (void*) page_location); if(addr == page_location) { // size_t target = i + core_map[i].npages; for(size_t j = i; j<i+core_map[i].npages;j++) { // DEBUG(DB_SWAP, "FREE %p\n",&core_map[j]); free_fixed_page(j); } // release_coremap_lock(lock); //release_coremap_lock(lock); //splx(spl); return; } } panic("VA Doesn't exist!"); }
/* Calls function above. Might need to change later, but works for KVAs at the time being //TODO use UIO word-aligned setting for better performance?? */ static void zero_page(size_t page_num) { vaddr_t ptr = PADDR_TO_KVADDR(core_map[page_num].pa); memset((void*) ptr,'\0',PAGE_SIZE); }
/* * This function is intended to be called by the VM system when it * initializes in order to find out what memory it has available to * manage. */ void ram_getsize(u_int32_t *lo, u_int32_t *hi) { *lo = firstpaddr; *hi = lastpaddr; #if OPT_A3 first_vm_addr = PADDR_TO_KVADDR(firstpaddr); #endif firstpaddr = lastpaddr = 0; }
/*kmalloc-routines*/ vaddr_t alloc_kpages(unsigned npages) { paddr_t pa = getppages(npages); if (pa == 0) { return 0; }else{ return PADDR_TO_KVADDR(pa); } }
void coremap_zero_page(paddr_t paddr) { vaddr_t va; KASSERT(coremap_pageispinned(paddr)); va = PADDR_TO_KVADDR(paddr); bzero((char *)va, PAGE_SIZE); }
/* Allocate/free some kernel-space virtual pages */ vaddr_t alloc_kpages(int npages) { paddr_t pa; pa = getppages(npages); if (pa==0) { return 0; } return PADDR_TO_KVADDR(pa); }
void free_kpages(vaddr_t addr) { lock_acquire(core_lock); int i =0; if(!(addr>USERTOP)){ for(i = 0; i< coremap_size;i++){ assert(curthread != NULL); assert(curthread->t_process != NULL); if(PADDR_TO_KVADDR(coremap[i].paddr) == addr && coremap[i].pid == curthread->t_process->PID){ break; } } } else{ for(i = 0; i< coremap_size;i++){ if(PADDR_TO_KVADDR(coremap[i].paddr) == addr){ break; } } } if(i >= coremap_size){ panic("Couldn't find paddr matching vaddr needing to free\n"); } assert(coremap[i].len != -1); int len =coremap[i].len; coremap[i].len = -1; int z; for(z = 0; z < len;z++){ //coremap[z+i].pid = -1; coremap[z+i].used = 0; } lock_release(core_lock); }
/* Allocate/free some kernel-space virtual pages */ vaddr_t alloc_kpages(int npages) { paddr_t pa; pa = getppages(npages); if (pa==0) return 0; return PADDR_TO_KVADDR(pa); panic("alloc_kpages: unexpected return\n"); }
paddr_t coremap_kalloc(unsigned long npages) { //kprintf("********************\nAllocating %x kpage(s)...\n",(int)npages); KASSERT((long)npages>0); paddr_t adr = 0; // initial value, will be changed if page finding is successful struct cm_entry* cm_begin = (struct cm_entry*)PADDR_TO_KVADDR(coremap); spinlock_acquire(&cm_lock); // lock the coremap //int before = coremap_kcount(); if (free_mem_pages == 0) panic("We is out of pages :'-(...\n"); // don't need to release spinlock, as we are already screwed (for now) unsigned int i; for (i=0; i<=(coremap_size-npages); i++) { // search for the first page in the contiguous allocation unsigned int j; bool success = 1; for (j=i; j<(i+npages); j++) { // check the succeeding entries to see if they are free if (cm_begin[j].in_use) { success=0; // one of the pages is being used, so we must start allocating from a different initial page i = j; // set i to the page that was in use, so the next initial page is passed it (for efficiency) break; } } if (success) { cm_begin[i].contig_pages=npages; // mark how many pages we are allocating, used when freeing pages unsigned int k; for (k=i; k<(i+npages); k++) { cm_begin[k].is_kern = 1; // kernel pages should not be swapped cm_begin[k].in_use = 1; cm_begin[k].pg = NULL; // kernel pages do not map to page structs } adr = ram_start+(i*PAGE_SIZE); // the physical address of the first page in this allocation free_mem_pages -= npages; break; KASSERT(cm_begin[i].contig_pages>0); } } //int after = coremap_kcount(); //KASSERT(before+(int)npages == after); spinlock_release(&cm_lock); //KASSERT(adr>0); KASSERT(cm_begin[i].contig_pages>0); return adr; // return the physical address of our allocation, or 0 for OUT-OF-MEMORY error }
/* * Swapout() * ----------------------- * 1. Sanity checks: We can't swap the pages holding the page table itself. * So, check if the paddr lie outside of coremap or not. * 2. We use mk_kuio to intiate a write to disk from the physical memory. * 3. insert the mapping of the page in the swaparea and mark the swapmap bitmap. * 4. Write out the page into disk. * 5. Invalidate all the tlb entries by writing TLBHI_INVALID(i) and * TLBLO_INVALID() into tlb entries. */ void swapout(u_int32_t chunk, u_int32_t paddr) { /* * sanity check: make sure that we are not swapping out any kernel page or * the address space containing the page table itself.That is the page * should be within the coremap memory starting from coremap_base */ assert(paddr >= coremap_base); int spl=splhigh(); /* * Initialize the write I/O from kernel buffer of size PAGE_SIZE starting * from paddr into the swaparea starting from offset indexed by chunk. */ struct uio swap_uio; mk_kuio(&swap_uio, /*kernel buffer*/(void*)PADDR_TO_KVADDR(paddr & PAGE_FRAME), /*Size of the buffer to writeout*/PAGE_SIZE, /*Starting offset of the swap area for write into */chunk, UIO_WRITE); /* * Before actual write we should mark the swap area as not-empty to avoid * inconsistency and add the swapped page into the mapping from the page to * this chunk. * */ //get the physical page struct _PTE ppage = coremap[(paddr-coremap_base)/PAGE_SIZE]; if (ppage.pid == 0) { panic("PID in swapout == 0!"); //DEBUG(DB_VM, "\npid = 0 at coremap[%u] (paddr=0x%x).\n\n", (paddr-coremap_base)/PAGE_SIZE, paddr); //ppage.pid = curthread->pid; // Workaround by tocurtis } DEBUG(DB_VM, "Putting page at address 0x%x into swap area with pid %d.\n", ppage.vaddr, ppage.pid); //add and mark into swaparea add_spage(ppage.vaddr, chunk, ppage.pid); splx(spl); /* * Now, do the actual writing out the page into disk, we can avoid this * write if the page is present on disk and it is not dirty. For now we are * not checking this and always writing to disk (i.e. if already in disk * then we are overwriting it). */ int result=VOP_WRITE(swap_fp, &swap_uio); if(result) panic("VM_SWAP_OUT: Failed"); //Invalidate the corresponding TLB entry in the associative cache spl=splhigh(); TLB_Invalidate(paddr); splx(spl); }
paddr_t swap_in(off_t offset){ //Check for empty page paddr_t pa; pa = user_page_alloc(); int result = read_from_disk(PADDR_TO_KVADDR(pa),offset) ; if (result) { panic("\n swap failed \n") ; } return pa; }
static void swapper(void *o, unsigned long l) { (void)o; (void)l; int spl; int result = 0; int nFreePages = 0; struct addrspace* as; PageTableEntry *pte; paddr_t paddr; vaddr_t kvaddr; u_int32_t loc; do { lock_acquire(&vmlock); spl = splhigh(); nFreePages = CoreMapReport(); splx(spl); while (nFreePages > 0) { cv_broadcast(&needMorePages, &vmlock); cv_wait(&needToEvicPage, &vmlock); spl = splhigh(); nFreePages = CoreMapReport(); splx(spl); } // kprintf("put some pages to disk.\n"); spl = splhigh(); result = CoreMapGetPageToSwap(&as, &pte); if (!result) { assert(pte->valid == 1); if (pte->writable == 0) { paddr_t paddr = pte->frameAddr << 12; FreeNPages(paddr); pte->valid = 0; } else { paddr = pte->frameAddr << 12; kvaddr = PADDR_TO_KVADDR(paddr); loc = ToDisk(kvaddr); assert((loc & 0xfffff000) == loc); FreeNPages(paddr); pte->frameAddr = loc >> 12; pte->valid = 0; pte->swapped = 1; } } splx(spl); // lock the pagetable and coremap entry cv_broadcast(&needMorePages, &vmlock); lock_release(&vmlock); } while (1);
int coremap_kcount(void){ struct cm_entry* cm_begin = (struct cm_entry*)PADDR_TO_KVADDR(coremap); int count = 0; int i; for (i=0; i<(int)coremap_size; i++) { if (cm_begin[i].in_use && cm_begin[i].is_kern)count++; } return count; }
void coremap_ufree(paddr_t page_addr) { KASSERT(page_addr!=31); int entry = (page_addr-ram_start)/PAGE_SIZE; struct cm_entry* cm_begin = (struct cm_entry*)PADDR_TO_KVADDR(coremap); spinlock_acquire(&cm_lock); //int before = coremap_ucount(); cm_begin[entry].in_use = 0; cm_begin[entry].pg = NULL; coremap_clean_page(PADDR_TO_KVADDR(page_addr)); free_mem_pages++; //int after = coremap_ucount(); //KASSERT(before-1 == after); spinlock_release(&cm_lock); }
paddr_t swap_kpages(int npages){ paddr_t pa = 0; int count = 0; struct coremap *local_coremap = coremap_list; struct coremap *start = coremap_list; //First find sequence of user allocated pages. Kernel pages cannot be swapped out. lock_acquire(coremap_lock); while(local_coremap != NULL && count!=npages){ if((local_coremap->status & 0x3) == 0){ //Last two bits - free and fixed should be 0. if(count == 0) start = local_coremap; count++; } else{ count = 0; } local_coremap = local_coremap->next; } if(count == npages){ local_coremap = start; count = 0; while(count!=npages){ local_coremap->timestamp = localtime; localtime++; local_coremap->pages = 0; local_coremap->status = 1; if((local_coremap->status & 0x4) == 0){ struct addrspace *temp = local_coremap->as; struct page_table_entry *pt = temp->page_table; while(pt!=NULL){ if(pt->pa == local_coremap->pa) break; pt=pt->next; } off_t off = write_to_swap(PADDR_TO_KVADDR(local_coremap->pa),pt->offset) ; if(off == -10) panic("swap Kpages : \n"); update_pagetable_entry(local_coremap,off); } //bzero((void *)PADDR_TO_KVADDR(local_coremap->pa),PAGE_SIZE); local_coremap->as = NULL; local_coremap = local_coremap->next; count++; } start->pages = count; lock_release(coremap_lock); return start->pa; } lock_release(coremap_lock); panic("swap_kpages : Could not swap out %d pages\n",npages); return pa; }
/* Called by alloc_kpages */ static vaddr_t page_nalloc(int npages) { bool lock = get_coremap_lock(); //KASSERT(spinlock_do_i_hold(&stealmem_lock)); bool blockStarted = false; int pagesFound = 0; int startingPage = 0; #ifdef SWAPPING_ENABLED //Make a page available for allocation, if needed. make_pages_available(npages,false); #endif int spl = splhigh(); for(size_t i = 0;i<page_count;i++) { if(!blockStarted && core_map[i].state == FREE) { blockStarted = true; pagesFound = 1; startingPage = i; } else if(blockStarted && core_map[i].state != FREE) { blockStarted = false; pagesFound = 0; } else if(blockStarted && core_map[i].state == FREE) { pagesFound++; } if(pagesFound == npages) { // DEBUG(DB_SWAP, "Getting %d npages %d-%d for FIXED\n",npages,startingPage,startingPage+npages-1); //KASSERT(spinlock_do_i_hold(&stealmem_lock)); //Allocate the block of pages, now. for(int j = startingPage; j<startingPage + npages; j++) { allocate_fixed_page(j); } core_map[startingPage].npages = npages; release_coremap_lock(lock); splx(spl); return PADDR_TO_KVADDR(core_map[startingPage].pa); } } panic("Couldn't find a big enough chunk for npages!"); return 0x0; }
void vm_bootstrap(void) { int page_num, coremap_size; paddr_t coremapaddr, temp; ram_getsize(&firstaddr, &lastaddr); page_num = (lastaddr-firstaddr) / PAGE_SIZE; freeaddr = firstaddr + page_num * sizeof(struct page); freeaddr = ROUNDUP(freeaddr, PAGE_SIZE);// added for pr->nfree error coremap = (struct page*)PADDR_TO_KVADDR(firstaddr); coremapaddr = freeaddr - firstaddr; coremap_size = ROUNDUP(coremapaddr, PAGE_SIZE)/PAGE_SIZE; pages_in_coremap=page_num; for (int i=0;i<page_num;i++){ if(i<coremap_size){ coremap[i].state = 1; }else{ coremap[i].state = 0; coremap[i].contained=false; } temp = PAGE_SIZE * i + freeaddr; coremap[i].pa = temp; coremap[i].va = PADDR_TO_KVADDR(temp); } beforeVM = false; }
//---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- void vm_bootstrap(void) { /* Do nothing. */ //kprintf("In vm_bootstrap: Done_vm_bootstrap = %d\n",done_vm_bootstrap); paddr_t firstpaddress, lastpaddress,freepaddress; ram_getsize(&firstpaddress, &lastpaddress); //ram_getsize in ram.c //lock_alloc_kpages = sem_create("lock_alloc_kpages",1); //lock for now //kprintf("before adjusting firstpaddress is %d\n",firstpaddress); while((firstpaddress % 4096) != 0) { firstpaddress++; } //kprintf("adjusting firstpaddress is %d\n",firstpaddress); total_pages = (lastpaddress - firstpaddress) / (PAGE_SIZE); //kprintf("Total number of free pages in physical memory: %d\n",total_pages); //initialzing coremap entries int i = 0; for(i = 0; i < total_pages; i++) { //putting in coremap entry details BUT DIDNT SET VIRTUAL ADDRESS YET!!! my_coremap[i].physical_address =firstpaddress + i*PAGE_SIZE; my_coremap[i].virtual_address =PADDR_TO_KVADDR(firstpaddress + i*PAGE_SIZE); my_coremap[i].state = FREE; my_coremap[i].num_continuous_pages = 0; } //Printing out coremap info for (i = 0; i < total_pages; i++) //set to 51 for now as too many entries if RAM big { //kprintf("page entry: %d physical adress: %u virtual address: %u state: %d\n", i, my_coremap[i].physical_address, my_coremap[i].virtual_address, my_coremap[i].state); } done_vm_bootstrap = 1; //kprintf("In vm_bootstrap: Done_vm_bootstrap = %d\n",done_vm_bootstrap); //splx(spl);//not in code return; }
/* * alloc_kpages * * Allocate some kernel-space virtual pages. * This is the interface kmalloc uses to get pages for its use. * * Synchronization: takes coremap_spinlock. * May block to swap pages out. */ vaddr_t alloc_kpages(int npages) { paddr_t pa; if (npages > 1) { pa = coremap_alloc_multipages(npages); } else { pa = coremap_alloc_one_page(NULL, 0 /* dopin */); } if (pa==INVALID_PADDR) { return 0; } return PADDR_TO_KVADDR(pa); }