struct pg* pick_page(void) { struct pg* it = proc->pg_data.pgs; struct pg dummy_min; struct pg* min = &dummy_min; pde_t* pe; dummy_min.ctime = INFINITY; for(;;) { it = proc->pg_data.pgs + 3; while(it != END) { if(it->state == RAM) min = min->ctime > it->ctime ? it : min; ++it; } if(min == &dummy_min) panic("could not choose page"); // check the PTE_A thing pe = walkpgdir(proc->pgdir, (void*) min->id, 0); if (*pe & PTE_A) // accesses *pe &= (~PTE_A); // turn it off else break; // found a fifo.. } return min; }
// Given a parent process's page table, create a copy // of it for a child. pde_t* copyuvm(pde_t *pgdir, uint sz) { pde_t *d; pte_t *pte; uint pa, i, flags; char *mem; if((d = setupkvm()) == 0) return 0; for(i = 0; i < sz; i += PGSIZE){ if((pte = walkpgdir(pgdir, (void *) i, 0)) == 0) panic("copyuvm: pte should exist"); if(!(*pte & PTE_P)) panic("copyuvm: page not present"); pa = PTE_ADDR(*pte); flags = PTE_FLAGS(*pte); if((mem = kalloc()) == 0) goto bad; memmove(mem, (char*)p2v(pa), PGSIZE); if(mappages(d, (void*)i, PGSIZE, v2p(mem), flags) < 0) goto bad; } return d; bad: freevm(d); return 0; }
int get_pte_permissions(pde_t* pgdir, void* addr) { pte_t* entry = walkpgdir(pgdir, addr, 0); if (entry == 0) return 0; return ((*entry) & 0xFFF); }
// Deallocate user pages to bring the process size from oldsz to // newsz. oldsz and newsz need not be page-aligned, nor does newsz // need to be less than oldsz. oldsz can be larger than the actual // process size. Returns the new process size. int deallocuvm(pde_t *pgdir, uint oldsz, uint newsz) { pte_t *pte; uint a, pa; if(newsz >= oldsz) return oldsz; a = PGROUNDUP(newsz); for(; a < oldsz; a += PGSIZE){ pte = walkpgdir(pgdir, (char*)a, 0); if(!pte) a += (NPTENTRIES - 1) * PGSIZE; else if((*pte & PTE_P) != 0){ pa = PTE_ADDR(*pte); if(pa == 0) panic("kfree"); char *v = p2v(pa); kfree(v); *pte = 0; } } return newsz; }
// Given a parent process's page table, create a copy // of it for a child. pde_t* copyuvm(pde_t *pgdir, uint sz) { pde_t *d = setupkvm(); pte_t *pte; uint pa, i; char *mem; if(!d) return 0; for(i = 0; i < sz; i += PGSIZE){ if(!(pte = walkpgdir(pgdir, (void *)i, 0))) panic("copyuvm: pte should exist\n"); if(!(*pte & PTE_P)) panic("copyuvm: page not present\n"); pa = PTE_ADDR(*pte); if(!(mem = kalloc())) goto bad; memmove(mem, (char *)pa, PGSIZE); if(!mappages(d, (void *)i, PGSIZE, PADDR(mem), PTE_W|PTE_U)) goto bad; } return d; bad: freevm(d); return 0; }
// Given a parent process's page table, create a copy // of it for a child. pde_t* copyuvm(pde_t *pgdir, uint sz) { pde_t *d; pte_t *pte; uint pa, i; char *mem; if((d = setupkvm()) == 0) return 0; // cs537 for(i = PGSIZE; i < sz; i += PGSIZE){ if((pte = walkpgdir(pgdir, (void*)i, 0)) == 0) panic("copyuvm: pte should exist"); if(!(*pte & PTE_P)) panic("copyuvm: page not present"); pa = PTE_ADDR(*pte); if((mem = kalloc()) == 0) goto bad; memmove(mem, (char*)pa, PGSIZE); if(mappages(d, (void*)i, PGSIZE, PADDR(mem), PTE_W|PTE_U) < 0) goto bad; } // cs537 /* copy stack region */ for(i = proc->sb; i < USERTOP ; i += PGSIZE){ if((pte = walkpgdir(pgdir, (void*)i, 0)) == 0) panic("copyuvm: pte should exist"); if(!(*pte & PTE_P)) panic("copyuvm: page not present"); pa = PTE_ADDR(*pte); if((mem = kalloc()) == 0) goto bad; memmove(mem, (char*)pa, PGSIZE); if(mappages(d, (void*)i, PGSIZE, PADDR(mem), PTE_W|PTE_U) < 0) goto bad; } return d; bad: freevm(d); return 0; }
// TODO(byan23): Copy the stack at the end of addr space. // Given a parent process's page table, create a copy // of it for a child. pde_t* copyuvm(pde_t *pgdir, uint sz) { pde_t *d; pte_t *pte; uint pa, i; char *mem; if((d = setupkvm()) == 0) return 0; // Copy code + heap. if (proc->pid == 1) i = 0; else i = PGSIZE; for(; i < sz; i += PGSIZE){ if((pte = walkpgdir(pgdir, (void*)i, 0)) == 0) panic("copyuvm: pte should exist"); if(!(*pte & PTE_P)) panic("copyuvm: page not present"); pa = PTE_ADDR(*pte); if((mem = kalloc()) == 0) goto bad; memmove(mem, (char*)pa, PGSIZE); if(mappages(d, (void*)i, PGSIZE, PADDR(mem), PTE_W|PTE_U) < 0) goto bad; } //cprintf("before coping stack from pid %d.\n", proc->pid); // TODO(byan23): Copy more stack as it grows. // Copy stack. i = USERTOP - proc->ssz; //i = USERTOP - PGSIZE; if((pte = walkpgdir(pgdir, (void*)i, 0)) == 0) panic("copyuvm: pte should exist"); if(!(*pte & PTE_P)) panic("copyuvm: page not present"); pa = PTE_ADDR(*pte); if((mem = kalloc()) == 0) goto bad; memmove(mem, (char*)pa, PGSIZE); if(mappages(d, (void*)i, PGSIZE, PADDR(mem), PTE_W|PTE_U) < 0) goto bad; return d; bad: freevm(d); return 0; }
// Return the physical address that a given user address // maps to. The result is also a kernel logical address, // since the kernel maps the physical memory allocated to user // processes directly. char* uva2ka(pde_t *pgdir, char *uva) { pte_t *pte = walkpgdir(pgdir, uva, 0); if(pte == 0) return 0; uint pa = PTE_ADDR(*pte); return (char *)pa; }
// Given a parent process's page table, create a copy // of it for a child. pde_t* copyuvm(pde_t *pgdir, uint sz, uint s_sz) { pde_t *d; pte_t *pte; uint pa, i, stack_size; char *mem; if((d = setupkvm()) == 0) return 0; // Start at PGSIZE to make the first page invalid for(i = PGSIZE; i < sz; i += PGSIZE){ if((pte = walkpgdir(pgdir, (void*)i, 0)) == 0) panic("copyuvm: pte should exist"); if(!(*pte & PTE_P)) panic("copyuvm: page not present"); pa = PTE_ADDR(*pte); if((mem = kalloc()) == 0) goto bad; memmove(mem, (char*)pa, PGSIZE); if(mappages(d, (void*)i, PGSIZE, PADDR(mem), PTE_W|PTE_U) < 0) goto bad; } // We need to loop again to copy the stack over stack_size = s_sz; for(i = stack_size; i < USERTOP; i+= PGSIZE){ if((pte = walkpgdir(pgdir, (void*)i, 0)) == 0) panic("copyuvm: pte should exist"); if(!(*pte & PTE_P)) panic("copyuvm: page not present"); pa = PTE_ADDR(*pte); if((mem = kalloc()) == 0) goto bad; memmove(mem, (char*)pa, PGSIZE); if(mappages(d, (void*)i, PGSIZE, PADDR(mem), PTE_W|PTE_U) < 0) goto bad; } return d; bad: freevm(d); return 0; }
// Clear PTE_U on a page. Used to create an inaccessible // page beneath the user stack. void clearpteu(pde_t *pgdir, char *uva) { pte_t *pte; pte = walkpgdir(pgdir, uva, 0); if (pte == 0) panic("clearpteu"); *pte &= ~PTE_U; }
int do_munprotect(struct proc*, void addr){ pte_t *pte = walkpgdir(p->pgdir, addr,0); if (pte == 0) { return -1; } *pte |= PTW_W; return 0; }
int set_pte_permissions(pde_t* pgdir, void* addr, uint perm) { pte_t* entry = walkpgdir(pgdir, addr, 0); if (entry == 0) return 0; // Clear read, write and execute permissions on the entry. *entry = (*entry & ~0xFFF) | (perm & 0xFFF); return 1; }
// Clear PTE_U on a page. Used to create an inaccessible // page beneath the user stack. void clearpteu(pde_t *pgdir, char *uva) { pte_t *pte; pte = walkpgdir(pgdir, uva, UVMPDXATTR, 0); if(pte == 0) panic("clearpteu"); *pte &= ~PTX_AP(U_AP); }
//PAGEBREAK! // Map user virtual address to kernel address. char* uva2ka(pde_t *pgdir, char *uva) { pte_t *pte; pte = walkpgdir(pgdir, uva, 0); if ((*pte & PTE_P) == 0) return 0; if ((*pte & PTE_U) == 0) return 0; return (char*) p2v(PTE_ADDR(*pte)); }
//Wille return 0 if error, 1 if success int check_page_fault(pde_t *pgdir, uint va) { pte_t *pte; uint pa; char *mem; //check if exists, and allowed by user if(va >= KERNBASE || va < 4096) { cprintf("Kernel or Null memory access\n"); return 0; } if((pte = walkpgdir(pgdir, (void *)va, 0)) == 0) { cprintf("memory access not in page dir\n"); return 0; } if( (!(*pte & PTE_P)) || (!(*pte & PTE_U)) ) { cprintf("memory access not for users\n"); return 0; } if( !(*pte & PTE_COW)) { cprintf("No cow bit, writing to read only mem\n"); return 0; } if( *pte & PTE_W) { cprintf("Writing other processes mem, error\n"); return 0; } pa = PTE_ADDR(*pte); //CHANGE: update reference counts acquire(&r_c.lock); if(r_c.ref_count[pa / 4096] == 1) { *pte = *pte | PTE_W; *pte = *pte & (~PTE_COW); release(&r_c.lock); //flush translation lookaside buffer flushtlb(); return 1; } else { r_c.ref_count[pa / 4096]--; release(&r_c.lock); if((mem = kalloc()) == 0) { return 0; } memmove(mem, (char*)p2v(pa), PGSIZE); *pte = v2p(mem) | PTE_FLAGS(*pte) | PTE_W; *pte = *pte & (~PTE_COW); acquire(&r_c.lock); r_c.ref_count[v2p(mem) / 4096] = 1; release(&r_c.lock); //flush translation lookaside buffer flushtlb(); return 1; } }
// Given a parent process's page table, create a copy // of it for a child. pde_t* copyuvm(pde_t *pgdir, uint sz, uint stack_addr) { pde_t *d; pte_t *pte; uint pa, i; char *mem; if((d = setupkvm()) == 0) return 0; for(i = PGSIZE; i < sz; i += PGSIZE){ if((pte = walkpgdir(pgdir, (void*)i, 0)) == 0) panic("copyuvm: pte should exist"); if(!(*pte & PTE_P)) panic("copyuvm: page not present"); pa = PTE_ADDR(*pte); if((mem = kalloc()) == 0) goto bad; memmove(mem, (char*)pa, PGSIZE); if(mappages(d, (void*)i, PGSIZE, PADDR(mem), PTE_W|PTE_U) < 0) goto bad; } // Copy the last page which is the new stack if((pte = walkpgdir(pgdir, (void*)(USERTOP-PGSIZE), 1)) == 0)//undestand what pgdir does panic("copyuvm: pte should exist"); if(!(*pte & PTE_P)) panic("copyuvm: page not present"); //cprintf(" last page is %d\n",pte); pa = PTE_ADDR(*pte);// what pa are they getting from pte if((mem = kalloc()) == 0) goto bad; memmove(mem, (char*)pa, PGSIZE); // copying some stuff.. figure out what 1 page is and copying it if(mappages(d, (void*)USERTOP-PGSIZE, PGSIZE, PADDR(mem), PTE_W|PTE_U) < 0)//mapping into new address space and making it valid goto bad; return d; bad: freevm(d); return 0; }
//PAGEBREAK! // Map user virtual address to kernel address. char* uva2ka(pde_t *pgdir, char *uva) { pte_t *pte; pte = walkpgdir(pgdir, uva, UVMPDXATTR, 0); if((uint)*pte == 0) return 0; if(((uint)*pte & PTX_AP(U_AP)) == 0) return 0; return (char*)p2v(PTE_ADDR(*pte)); }
void swapOut(struct proc* p) { //write to file char filename[9]; struct file* f; uint i; pte_t* pte=0; getSwapFileName(p, filename); //cprintf("swapout %s %d\n", p->name, p->pid); //release(&inswapper_lk); //acquire(&inswapper_lk); release(&ptable.lock); f = openKernelFile(filename, O_CREATE | O_WRONLY); acquire(&ptable.lock); //cprintf("sfff\n"); //release(&inswapper_lk); if(f == 0) panic("swapout: file open error\n"); //cprintf("swapout: before write\n"); int freed = 0; for (i = 0; i < p->sz; i += PGSIZE) { if (!(pte = walkpgdir(p->pgdir, (void *) i, 0))) panic("swapout: pte should exist\n"); //cprintf("walkpgdir: ok\n"); if (!(*pte & PTE_P)) panic("swapout: page not present\n"); if((*pte & PTE_SHR) || !(*pte & PTE_U)) continue; char *addr=(char*)p2v(PTE_ADDR(*pte)); //acquire(&inswapper_lk); release(&ptable.lock); filewrite(f, addr, PGSIZE); acquire(&ptable.lock); // release(&inswapper_lk); //cprintf("(w=%s)", addr); kfree(addr); *pte = 0; freed++; //cprintf("swapout: wrote %d\n",i/PGSIZE); } //cprintf("swapout freed %d\n", freed); //kfree((char*) p->pgdir); //cprintf("swapout: after write\n"); //freevm(p->pgdir); // acquire(&inswapper_lk); release(&ptable.lock); fileclose(f); acquire(&ptable.lock); // release(&inswapper_lk); }
// Clear PTE_U on a page. Used to create an inaccessible page beneath // the user stack (to trap stack underflow). void clearpteu (pgd_t *pgdir, char *uva) { pte_t *pte; pte = walkpgdir(pgdir, uva, 0); if (pte == 0) { panic("clearpteu"); } // in ARM, we change the AP field (ap & 0x3) << 4) *pte = (*pte & ~(0x03 << 6)) | AP_RW_1; }
void* shmem_access(int page_number) { if(page_number<0||page_number>3) {return NULL;} //get physical address of the shared page void* sharedPhyAddr=shmem_addr[page_number]; if(proc->bitmap[page_number]!=-1) { return (void*)(proc->bitmap[page_number]); } int i=1;//loops from 1 to 4 and finds the page in AS. if 1 then last page is available , if 2 then second last is available and so on. search till the present is -pde&PTE_P is 1. void* VA; pde_t *pde; //cprintf("before shmem access in pid %d, shared pages = %d\n", proc->pid,proc->procShmemCount); for(i=1;i<=4;i++) { VA=(void*)(USERTOP-4096*i); pde=walkpgdir(proc->pgdir,VA,0); //cprintf("i=%d\n",i); if(~*pde&PTE_P) { // cprintf("page found number=%d\n",i); //page found break; } } if(i==5) //no page is found return error { return NULL; } else { if(proc->bitmap[page_number]==-1) { //mapping to the physical address of shared page mappages(proc->pgdir,(void*)(USERTOP-4096*i),(uint)PGSIZE,(uint)sharedPhyAddr,PTE_W|PTE_U); //updating shared pages only if the mapping is successfull proc->procShmemCount++; inc_shmem_proc_count(page_number); //shmem_proc_count[page_number]++; proc->bitmap[page_number]=USERTOP-4096*i; } //cprintf("after shmem access in pid %d, shared pages = %d\n", proc->pid,proc->procShmemCount); //cprintf("address=%p\n",returnAddress,(void*)returnAddress); return (void*)(proc->bitmap[page_number]); } return NULL; }
void swapIn(struct proc* p) { //read from file char filename[9]; struct file* f; uint i; char* buff; pte_t* pte; //cprintf("swapin %s %d\n", p->name, p->pid); getSwapFileName(p, filename); release(&ptable.lock); f = openKernelFile(filename, O_RDWR); //cprintf("1"); acquire(&ptable.lock); if(f == 0) panic("swapin: file open error\n"); f->off = 0; //p->pgdir = setupkvm(); if (!p->pgdir) panic("swapin: setupkvm failed\n"); int recovered = 0; for (i = 0; i < p->sz; i += PGSIZE) { if((pte = walkpgdir(p->pgdir, (char*) i, 0)) == 0){ //cprintf("skip"); //continue; } if(*pte != 0) continue; if (!(buff = kalloc())) panic("swapin: kalloc failed\n"); release(&ptable.lock); fileread(f, buff, PGSIZE); acquire(&ptable.lock); //cprintf("(%s)", buff); if (mappages(p->pgdir, (void*) i, PGSIZE, v2p(buff), PTE_W | PTE_U) < 0) panic("swapin: mappages failed\n"); recovered++; } //cprintf("swapin recovered %d\n", recovered); release(&ptable.lock); fileclose(f); //cprintf("swapin2"); //unlinkKernelFile(filename); acquire(&ptable.lock); //cprintf("swapin3"); }
void nfu_update(void) { struct pg* iter = proc->pg_data.pgs; pde_t* pte; while(iter < END) { if(iter->state != PG_UNUSED) { iter->nfu_time >>= 1; // ageing - shift it pte = walkpgdir(proc->pgdir, (void*) iter->id, 0); if(*pte && *pte & PTE_A) { // was used so turn the bit one iter->nfu_time |= 0x8000; *pte = *pte & ~PTE_A; } } ++iter; }
// Deallocate user pages to bring the process size from oldsz to // newsz. oldsz and newsz need not be page-aligned, nor does newsz // need to be less than oldsz. oldsz can be larger than the actual // process size. Returns the new process size. int deallocuvm(pde_t *pgdir, uint oldsz, uint newsz) { char *a = (char *)PGROUNDUP(newsz); char *last = PGROUNDDOWN(oldsz - 1); for(; a <= last; a += PGSIZE){ pte_t *pte = walkpgdir(pgdir, a, 0); if(pte && (*pte & PTE_P) != 0){ uint pa = PTE_ADDR(*pte); if(pa == 0) panic("kfree"); kfree((void *) pa); *pte = 0; } } return newsz < oldsz ? newsz : oldsz; }
void do_munprotect(struct proc *p, void *addr, int len){ uint vpn; uint ad = (uint) addr; int size = ad + (len*PGSIZE)-1; for (vpn = ad; vpn < size; vpn += PGSIZE) { pte_t *pte; pde_t *pde = p->pgdir; if ((pte = walkpgdir(pde, (void*)vpn, 0)) == 0) { cprintf("VPN %x is not mapped\n", vpn); } else { if (pte!=0 && *pte&PTE_U && *pte&PTE_P){ *pte= *pte | PTE_W; } } } lcr3(v2p(proc->pgdir)); }
int showmp(uint a, uint b) { // cprintf("showmp %x %x %x %x\n", rcr3(), cpu->proc->pgdir, a, b); if (a > b) { panic("show mapping"); } char *va = (char*)PGROUNDDOWN(a); char *vb = (char*)PGROUNDDOWN(b); cprintf("Mappings of virtual address %x to %x:\n", va, vb); cprintf("Virt Addr\tPhys Addr\tPermission\n"); pte_t *pte; for (;;) { pte = walkpgdir(cpu->proc->pgdir, va); if (pte == 0 || *pte == 0) { cprintf("%x\t\t-\n", va); } else { uint flags = PTE_FLAGS(*pte); cprintf("%x\t\t%x\t\t", va, PTE_ADDR(*pte)); if (flags & PTE_U) { cprintf("User\t"); } else cprintf("-\t"); cprintf("/ "); if (flags & PTE_W) { cprintf("Writeable"); } else cprintf("-"); cprintf(" %d\n", flags&PTE_D); } if (va == vb) break; va += PGSIZE; } return 0; }
//PAGEBREAK! // Map user virtual address to kernel address. char* uva2ka (pgd_t *pgdir, char *uva) { pte_t *pte; pte = walkpgdir(pgdir, uva, 0); // make sure it exists if ((*pte & (ENTRY_PAGE | ENTRY_VALID)) == 0) { return 0; } // make sure it is a user page if (PTE_AP(*pte) != AP_RW_1_0) { return 0; } return (char*) p2v(PTE_ADDR(*pte)); }
void do_munprotect(struct proc *p, void *addr) { uint i; for(i = (uint)addr ; i < p -> sz; i += PGSIZE) { pte_t *pte; pte_t *pde = p->pgdir; if((pte = walkpgdir(pde, (void*)i, 0)) == 0) { cprintf("not mapped\n"); } else if(!((*pte)&PTE_W)){ *pte = *pte | PTE_W; lcr3(v2p(proc->pgdir)); } } }
// Load a program segment into pgdir. addr must be page-aligned // and the pages from addr to addr+sz must already be mapped. int loaduvm(pde_t *pgdir, char *addr, struct inode *ip, uint offset, uint sz) { uint i, pa, n; pte_t *pte; if ((uint) addr % PGSIZE != 0) panic("loaduvm: addr must be page aligned"); for (i = 0; i < sz; i += PGSIZE) { if ((pte = walkpgdir(pgdir, addr + i, 0)) == 0) panic("loaduvm: address should exist"); pa = PTE_ADDR(*pte); if (sz - i < PGSIZE) n = sz - i; else n = PGSIZE; if (readi(ip, p2v(pa), offset + i, n) != n) return -1; } return 0; }
uint swapOut(pde_t *pgdir) { uint va_page; pde_t *pte; char* psyc_page; va_page = nextPageSwap(pgdir); if (!isNotInitShell(proc) || va_page == UNUSED_VA) { return -1; } if ((proc->swapData).nSwappedPages >= MAX_SWAP_PAGES) { panic("Trying to use more than 30 pages!!!"); } // add page address to swap list in proc, copy the page to swap file and inc the num of swapped pages va_page = PGROUNDDOWN(va_page); int j = removeMemAddr(va_page); int i = addSwapAddr(va_page); #ifdef NFU (proc->swapData).nfu[i + MAX_PSYC_PAGES] = (proc->swapData).nfu[j]; (proc->swapData).nfu[j] = 0; #endif #if defined(FIFO) || defined(SCFIFO) (proc->swapData).creationTime[i + MAX_PSYC_PAGES] = (proc->swapData).creationTime[j]; (proc->swapData).creationTime[j] = -1; #endif writeToSwapFile(proc, (char *) va_page, i * PGSIZE, PGSIZE); // get address of va_page in page table pgdir, update flags and free it if ((pte = walkpgdir(pgdir, (char *) va_page, 0)) == 0) panic("swapOut: Page table not found!"); *pte &= ~PTE_P; // change flag to not present *pte |= PTE_PG; // change flag to swapped out psyc_page = p2v(PTE_ADDR(*pte)); kfree(psyc_page); lcr3(PTE_ADDR(v2p(pgdir))); // switch to new address space return va_page; }
// Create PTEs for virtual addresses starting at va that refer to // physical addresses starting at pa. va and size might not // be page-aligned. static int mappages(pde_t *pgdir, void *va, uint size, uint pa, int perm) { char *a, *last; pte_t *pte; a = (char*)PGROUNDDOWN((uint)va); last = (char*)PGROUNDDOWN(((uint)va) + size - 1); for(;;){ if((pte = walkpgdir(pgdir, a, 1)) == 0) return -1; if(*pte & PTE_P) panic("remap"); *pte = pa | perm | PTE_P; if(a == last) break; a += PGSIZE; pa += PGSIZE; } return 0; }