// Grow current process's memory by n bytes. // Return 0 on success, -1 on failure. int growproc(int n) { uint sz; sz = proc->sz; if(n > 0) { //prevent heap from overwriting our stack int se=proc->se; int page_n = PGROUNDUP(n); int heap_size = PGROUNDUP(proc->sz); if((heap_size + page_n) > (se - PGSIZE)) { //panic("Heap is overwriting stack!"); //just return -1 is sufficient. No need to panic! return -1; } if((sz = allocuvm(proc->pgdir, sz, sz + n)) == 0) return -1; } else if(n < 0) { if((sz = deallocuvm(proc->pgdir, sz, sz + n)) == 0) return -1; } proc->sz = sz; switchuvm(proc); return 0; }
int map_section(int k, int fd, SCNHDR *shdr, int envid) { u_int page_count; Pte *ptes; int i, retval = 0, type; off_t curloc = lseek(fd, 0, SEEK_CUR); u32 start, zero_start, len; if (!strcmp(shdr->s_name, ".text")) type = MS_TEXT; else if (!strcmp(shdr->s_name, ".data")) type = MS_DATA; else if (!strcmp(shdr->s_name, ".bss")) type = MS_BSS; else { type = MS_UNKNOWN; return 0; } page_count = PGNO(PGROUNDUP(shdr->s_size)); if (type == MS_BSS) { start = PGROUNDUP(shdr->s_vaddr); if (start != shdr->s_vaddr) page_count--; } else start = shdr->s_vaddr; if ((ptes = malloc(sizeof(Pte) * page_count)) == 0) { return -1; } for (i=0; i < page_count; i++) ptes[i] = PG_U|PG_W|PG_P; if (sys_self_insert_pte_range(k, ptes, page_count, TEMP_REGION) < 0 || sys_insert_pte_range(k, &vpt[PGNO(TEMP_REGION)], page_count, start, k, envid) < 0 || (type != MS_BSS && (lseek(fd, shdr->s_scnptr, SEEK_SET) != shdr->s_scnptr || read(fd, (void*)TEMP_REGION, shdr->s_size) != shdr->s_size || lseek(fd, curloc, SEEK_SET) != curloc))) { retval = -1; } if (type == MS_BSS) { zero_start = TEMP_REGION; len = page_count * NBPG; } else { zero_start = TEMP_REGION + shdr->s_size; len = NBPG - (zero_start & PGMASK); } bzero((void*)zero_start, len); if (type == MS_TEXT) mprotect((void*)TEMP_REGION, page_count*NBPG, PROT_READ); for (i=0; i < page_count; i++) ptes[i] = 0; sys_self_insert_pte_range(k, ptes, page_count, TEMP_REGION); if (retval == -1) sys_insert_pte_range(k, ptes, page_count, start, k, envid); return retval; }
void kmem_t::init_range(void* vstart, void* vend) { vstart = (char*)PGROUNDUP((uint)vstart); vend = (char*)PGROUNDUP((uint)vend); int startpfn = MAP_NR(vstart); int endpfn = MAP_NR(vend); int j=0; for(int i = startpfn; vstart < vend; i++, j++) { page_t* page = pages + i; page->vaddr = (void*)vstart; free_page(page); vstart += PGSIZE; } }
/* Find an empty sector and bring it into use. If there isn't one, try and allocate one. If that fails, return -1. */ static Int maybe_commission_sector ( void ) { Char msg[100]; Int s; for (s = 0; s < VG_TC_N_SECTORS; s++) { if (vg_tc[s] != NULL && vg_tc_used[s] == 0) { vg_tc_age[s] = overall_in_count; VG_(sprintf)(msg, "after commission of sector %d " "at time %d", s, vg_tc_age[s]); pp_tt_tc_status ( msg ); # ifdef DEBUG_TRANSTAB VG_(sanity_check_tt_tc)(); # endif return s; } } for (s = 0; s < VG_TC_N_SECTORS; s++) { if (vg_tc[s] == NULL) { #if 1 vg_tc[s] = VG_(get_memory_from_mmap) ( vg_tc_sector_szB, "trans-cache(sector)" ); #else // Alternative: put translations in an mmap'd file. The main // reason is to help OProfile -- OProfile can assign time spent in // translations to a particular file. The file format doesn't // really matter, which is good because it's not really readable, // being generated code but not a proper ELF file. Char buf[20]; static Int count = 0; Int fd; VG_(sprintf)(buf, ".transtab.%d", count++); fd = VG_(open)(buf, VKI_O_RDWR|VKI_O_CREAT|VKI_O_TRUNC, 0700); //VG_(unlink)(buf); VG_(do_syscall)(__NR_ftruncate, fd, PGROUNDUP(vg_tc_sector_szB)); vg_tc[s] = VG_(mmap)(0, PGROUNDUP(vg_tc_sector_szB), VKI_PROT_READ|VKI_PROT_WRITE|VKI_PROT_EXEC, VKI_MAP_SHARED, 0, fd, 0); VG_(close)(fd); #endif vg_tc_used[s] = 0; VG_(sprintf)(msg, "after allocation of sector %d (size %d)", s, vg_tc_sector_szB ); pp_tt_tc_status ( msg ); return maybe_commission_sector(); } } return -1; }
static int __zero_segment (int envid, u_int start, u_int sz) { u_int temp_pages; assert (!(start & PGMASK)); temp_pages = (u_int)__malloc(PGROUNDUP(sz)); if (temp_pages == 0) return -1; /* alloc pages for this segment and map into our address space writeable */ if (__vm_alloc_region (temp_pages, sz, 0, PG_P|PG_U|PG_W) < 0) { __free((void*)temp_pages); return -1; } /* and map them into the other address space */ if (__vm_share_region (temp_pages, sz, 0, 0, envid, start) < 0) { __free((void*)temp_pages); return -1; } /* zero the pages */ bzero ((void *)temp_pages, sz); /* and remove our mapping of the pages */ if (__vm_free_region (temp_pages, sz, 0) < 0) { __free((void*)temp_pages); return -1; } __free((void*)temp_pages); return 0; }
// Deallocate user pages to bring the process size from oldsz to // newsz. oldsz and newsz need not be page-aligned, nor does newsz // need to be less than oldsz. oldsz can be larger than the actual // process size. Returns the new process size. int deallocuvm(pde_t *pgdir, uint oldsz, uint newsz) { pte_t *pte; uint a, pa; if(newsz >= oldsz) return oldsz; a = PGROUNDUP(newsz); int isShmem; //added by Ying for(; a < oldsz; a += PGSIZE){ isShmem = 0; //added by Ying pte = walkpgdir(pgdir, (char*)a, 0); if(pte && (*pte & PTE_P) != 0){ pa = PTE_ADDR(*pte); //added by Ying int i; for (i = 0; i < 4; i++) { if (pa == (unsigned int)shmem_addr[i]) isShmem = 1; } if (isShmem) continue; if(pa == 0) panic("kfree"); kfree((char*)pa); *pte = 0; } } return newsz; }
// Deallocate user pages to bring the process size from oldsz to // newsz. oldsz and newsz need not be page-aligned, nor does newsz // need to be less than oldsz. oldsz can be larger than the actual // process size. Returns the new process size. int deallocuvm(pde_t *pgdir, uint oldsz, uint newsz) { pte_t *pte; uint a, pa; if(newsz >= oldsz) return oldsz; a = PGROUNDUP(newsz); for(; a < oldsz; a += PGSIZE){ pte = walkpgdir(pgdir, (char*)a, 0); if(!pte) a += (NPTENTRIES - 1) * PGSIZE; else if((*pte & PTE_P) != 0){ pa = PTE_ADDR(*pte); if(pa == 0) panic("kfree"); char *v = p2v(pa); kfree(v); *pte = 0; } } return newsz; }
// Deallocate user pages to bring the process size from oldsz to // newsz. oldsz and newsz need not be page-aligned, nor does newsz // need to be less than oldsz. oldsz can be larger than the actual // process size. Returns the new process size. int deallocuvm(pde_t *pgdir, uint oldsz, uint newsz) { pte_t *pte; uint a, pa; if(newsz >= oldsz) return oldsz; a = PGROUNDUP(newsz); for(; a < oldsz; a += PGSIZE){ pte = walkpgdir(pgdir, (char*)a, 0); if(!pte) a += (NPTENTRIES - 1) * PGSIZE; else if((*pte & PTE_P) != 0){ pa = PTE_ADDR(*pte); if(pa == 0) panic("kfree"); acquire(&r_c.lock); r_c.ref_count[pa / 4096] --; if(r_c.ref_count[pa / 4096] == 0) { char *v = p2v(pa); kfree(v); } release(&r_c.lock); *pte = 0; } } return newsz; }
// Allocate page tables and physical memory to grow process from oldsz to // newsz, which need not be page aligned. Returns new size or 0 on error. int allocuvm(pde_t *pgdir, uint oldsz, uint newsz) { char *mem; uint a; if(newsz >= KERNBASE) return 0; if(newsz < oldsz) return oldsz; cprintf("%d \n", oldsz); cprintf("%d \n", newsz); a = PGROUNDUP(oldsz); for(; a < newsz; a += PGSIZE){ mem = kalloc(); if(mem == 0){ cprintf("allocuvm out of memory\n"); deallocuvm(pgdir, newsz, oldsz); return 0; } memset(mem, 0, PGSIZE); mappages(pgdir, (char*)a, PGSIZE, v2p(mem), PTE_W|PTE_U); } return newsz; }
int register_pagefault_handler (uint vastart, int len, int (*handler)(uint,int)) { handler_t *tmp = handlers; uint vaend = PGROUNDUP (vastart+len); vastart = PGROUNDDOWN (vastart); while (tmp) { if ((vastart >= tmp->vastart) && (vastart < tmp->vaend)) { return (-1); } if ((vaend >= tmp->vastart) && (vaend < tmp->vaend)) { return (-1); } if ((vastart < tmp->vastart) && (vaend >= tmp->vaend)) { return (-1); } tmp = tmp->next; } tmp = (handler_t *) __malloc (sizeof(handler_t)); assert(tmp); tmp->vastart = vastart; tmp->vaend = vaend; tmp->handler = handler; tmp->next = handlers; handlers = tmp; /* kprintf ("(%d) pagefault_handler registered for %x -- %x\n", geteid(), tmp->vastart, tmp->vaend); */ return (0); }
// Allocate page tables and physical memory to grow process from oldsz to // newsz, which need not be page aligned. Returns new size or 0 on error. int allocuvm(pde_t *pgdir, uint oldsz, uint newsz) { char *mem; uint a; if(newsz > USERTOP) return 0; if(newsz < oldsz) return oldsz; a = PGROUNDUP(oldsz); for(; a < newsz; a += PGSIZE){ mem = kalloc(); if(mem == 0){ cprintf("proc %d: allocuvm out of memory\n", proc->pid); cprintf("proc %d: newsz: %d\n", proc->pid, newsz); cprintf("proc %d: oldsz: %d\n", proc->pid, oldsz); deallocuvm(pgdir, newsz, oldsz); return 0; } memset(mem, 0, PGSIZE); mappages(pgdir, (char*)a, PGSIZE, PADDR(mem), PTE_W|PTE_U); } return newsz; }
void freerange(void *vstart, void *vend) { char *p; p = (char*)PGROUNDUP((uint)vstart); for(; p + PGSIZE <= (char*)vend; p += PGSIZE) kfree(p); }
// Allocate page tables and physical memory to grow process from oldsz to // newsz, which need not be page aligned. Returns new size or 0 on error. int allocuvm(pde_t *pgdir, uint oldsz, uint newsz) { char *mem; uint a; if(newsz >= KERNBASE) return 0; if(newsz < oldsz) return oldsz; a = PGROUNDUP(oldsz); for(; a < newsz; a += PGSIZE){ mem = kalloc(); if(mem == 0){ cprintf("allocuvm out of memory\n"); deallocuvm(pgdir, newsz, oldsz); return 0; } memset(mem, 0, PGSIZE); if (mappages(pgdir, (char*)a, PGSIZE, V2P(mem), PTE_W|PTE_U) < 0) panic("allocuvm: cannot create pagetable"); } return newsz; }
// Allocate page tables and physical memory to grow process from oldsz to // newsz, which need not be page aligned. Returns new size or 0 on error. int allocuvm(pde_t *pgdir, uint oldsz, uint newsz) { char *mem; uint a; if(newsz > USERTOP) return 0; if(newsz < oldsz) return oldsz; a = PGROUNDUP(oldsz); for(; a < newsz; a += PGSIZE){ mem = kalloc(); if(mem == 0){ cprintf("allocuvm out of memory\n"); deallocuvm(pgdir, newsz, oldsz); return 0; } //cprintf("alloc a new page starting at: %x\n", mem); memset(mem, 0, PGSIZE); mappages(pgdir, (char*)a, PGSIZE, PADDR(mem), PTE_W|PTE_U); } return newsz; }
/* set new protection flags on an address range */ void VG_(mprotect_range)(Addr a, UInt len, UInt prot) { Segment *s, *next; static const Bool debug = False || mem_debug; if (debug) VG_(printf)("mprotect_range(%p, %d, %x)\n", a, len, prot); /* Everything must be page-aligned */ vg_assert((a & (VKI_BYTES_PER_PAGE-1)) == 0); len = PGROUNDUP(len); VG_(split_segment)(a); VG_(split_segment)(a+len); for(s = VG_(SkipList_Find)(&sk_segments, &a); s != NULL && s->addr < a+len; s = next) { next = VG_(SkipNode_Next)(&sk_segments, s); if (s->addr < a) continue; s->prot = prot; } merge_segments(a, len); }
/* int munmap(void* addr, size_t length) */ int sys_munmap(void) { void* addr; size_t length; if(syscall_get_int((int*)&addr, 0)) return -1; if(syscall_get_int((int*)&length, 1)) return -1; /* Is the address okay? */ uintptr_t address = (uintptr_t)addr; if(length == 0) return -1; /* Address must be page aligned */ if(address != PGROUNDDOWN(address)) return -1; /* Address must be in the proper range */ if(address < PGROUNDUP(rproc->heap_end) + PGSIZE || address >= PGROUNDDOWN(rproc->mmap_start) || address + length > PGROUNDDOWN(rproc->mmap_start)) return -1; uintptr_t x; for(x = 0;x < length;x += PGSIZE) vm_unmappage(x + address, rproc->pgdir); return 0; }
// Allocate page tables and physical memory to grow process from oldsz to // newsz, which need not be page aligned. Returns new size or 0 on error. int allocuvm(pde_t *pgdir, uint oldsz, uint newsz) { char *mem; uint a; //interesting -video if(newsz > USERTOP) return 0; if(newsz < oldsz) return oldsz; a = PGROUNDUP(oldsz); for(; a < newsz; a += PGSIZE){ mem = kalloc(); if(mem == 0){ cprintf("allocuvm out of memory\n"); deallocuvm(pgdir, newsz, oldsz); return 0; } //clear page cause it might hold sensitive info memset(mem, 0, PGSIZE); mappages(pgdir, (char*)a, PGSIZE, PADDR(mem), PTE_W|PTE_U); } return newsz; }
void cman_init(void) { /* Zero our space */ size_t sz = PGROUNDUP(KVM_DISK_E - KVM_DISK_S); memset((void*)KVM_DISK_S, 0, sz); head = (void*)KVM_DISK_S; head->sz = sz; }
int main(int argc, char *argv[]) { ppid = getpid(); char *brk = sbrk(0); sbrk(PGROUNDUP(brk) - (uint)brk); char *start = sbrk(0); // should fail for address zero, which shouldn't be mapped in // the process any more because of part a of project assert(mprotect(0, 1) == -1); assert(munprotect(0, 1) == -1); printf(1, "starting address is %d\n", (uint)start); assert(mprotect(start, 1) == -1); assert(munprotect(start, 1) == -1); sbrk(PGSIZE * 1); assert(mprotect(start, 2) == -1); assert(munprotect(start, 2) == -1); assert(mprotect(start + 1, 1) == -1); assert(munprotect(start + 1, 1) == -1); assert(mprotect(start, 0) == -1); assert(munprotect(start, 0) == -1); assert(mprotect(start, -2) == -1); assert(munprotect(start, -2) == -1); assert(mprotect(start, 1) == 0); assert(munprotect(start, 1) == 0); // protect page again to check that permissions // carry over on fork assert(mprotect(start, 1) == 0); int rv = fork(); if (rv < 0) { printf(1, "Fork failed. Oops. This shouldn't happen, right?!\n"); } else if (rv == 0) { printf(1, "Attempting to write to protected memory in a child process\n"); printf(1, "This should cause the child to die if the test succeeds\n"); *start = 55; // this should cause the child proc to DIR printf(1, "TEST FAILED (if you got here, child didn't crash)\n"); exit(); } else { assert(munprotect(start, 1) == 0); wait(); } printf(1, "TEST PASSED\n"); exit(); }
void freerange(void *vstart, void *vend) { char *p; p = (char*)PGROUNDUP((uint)vstart); cprintf("p = %x %x vend = %x\n", p, vstart, vend); for(; p + PGSIZE <= (char*)vend; p += PGSIZE) kfree(p); }
Addr VG_(find_map_space)(Addr addr, UInt len, Bool for_client) { static const Bool debug = False || mem_debug; Segment *s; Addr ret; Addr limit = (for_client ? VG_(client_end) : VG_(valgrind_mmap_end)); if (addr == 0) addr = for_client ? VG_(client_mapbase) : VG_(valgrind_base); else { /* leave space for redzone and still try to get the exact address asked for */ addr -= VKI_BYTES_PER_PAGE; } ret = addr; /* Everything must be page-aligned */ vg_assert((addr & (VKI_BYTES_PER_PAGE-1)) == 0); len = PGROUNDUP(len); len += VKI_BYTES_PER_PAGE * 2; /* leave redzone gaps before and after mapping */ if (debug) VG_(printf)("find_map_space: ret starts as %p-%p client=%d\n", ret, ret+len, for_client); for(s = VG_(SkipList_Find)(&sk_segments, &ret); s != NULL && s->addr < (ret+len); s = VG_(SkipNode_Next)(&sk_segments, s)) { if (debug) VG_(printf)("s->addr=%p len=%d (%p) ret=%p\n", s->addr, s->len, s->addr+s->len, ret); if (s->addr < (ret + len) && (s->addr + s->len) > ret) ret = s->addr+s->len; } if (debug) { if (s) VG_(printf)(" s->addr=%p ->len=%d\n", s->addr, s->len); else VG_(printf)(" s == NULL\n"); } if ((limit - len) < ret) ret = 0; /* no space */ else ret += VKI_BYTES_PER_PAGE; /* skip leading redzone */ if (debug) VG_(printf)("find_map_space(%p, %d, %d) -> %p\n", addr, len, for_client, ret); return ret; }
// Initialize free list of physical pages. void kinit(void) { char *p; initlock(&kmem.lock, "kmem"); p = (char*)PGROUNDUP((uint)newend); for(; p + PGSIZE <= (char*)p2v(PHYSTOP); p += PGSIZE) kfree(p); }
// Initialize free list of physical pages. void kinit(void) { extern char end[]; initlock(&kmem.lock, "kmem"); char *p = (char*)PGROUNDUP((uint)end); for( ; p + PGSIZE - 1 < (char*) PHYSTOP; p += PGSIZE) kfree(p); }
void *shmat(int shmid, int shmflg){ char * memory; uint size; void* ret = (void*) -1; int i; int flag_for =0; acquire(&shmtable.lock); if(shmtable.shmarray[shmid].size > 0){ size = PGROUNDUP(proc->sz); ret =(void*)size; if(size + PGSIZE >= KERNBASE){ cprintf("shmat:not enogh memory\n"); release(&shmtable.lock); return (void*)-1; } //all is fine for(i = 0;shmtable.shmarray[shmid].addr[i] && size < KERNBASE;i++){ //find the adress of the memory memory = shmtable.shmarray[shmid].addr[i]; flag_for =1; //we did at least one page if(shmflg == SHM_RDONLY){ mappages(proc->pgdir, (char*)size, PGSIZE, v2p(memory), PTE_U); size += PGSIZE; break; } else if(shmflg == SHM_RDWR){ mappages(proc->pgdir, (char*)size, PGSIZE, v2p(memory), PTE_W|PTE_U); size += PGSIZE; break; } else{//default flag_for = 0; size += PGSIZE; } }//end of for if(flag_for){ proc->sz =size; shmtable.shmarray[shmid].linkcounter++; } else{ ret =(void*) -1; cprintf("shmat: no shmflg (flag_for = 0\n"); } } else{ cprintf("shmat: the memory isn't there\n"); release(&shmtable.lock); return (void*)-1; } shmtable.shmarray[shmid].virtual_addr =ret; proc->va[shmid]=ret; release(&shmtable.lock); return ret; }
char* enter_alloc(void) { if(newend == 0) newend = end; void *p = (void)PGROUNDUP((uint)newend); memset(p, 0, PGSIZE); newend = newend + PGSIZE; return p; }
void* cman_alloc(size_t sz) { if(sz == 0) return NULL; sz = PGROUNDUP(sz); struct cman_node* curr = head; struct cman_node* last = NULL; struct cman_node* result = NULL; /* Search for a node with enough room */ while(curr) { if(curr->sz >= sz) { result = curr; break; } last = curr; curr = curr->next; } /* Did we find anything? */ if(!result) { cprintf("cman: Disk cache out of space!\n"); return NULL; } if(sz == result->sz) { /* Consume the node */ if(last) { /* update the head */ head = result->next; } else { /* Update the broken pointer */ last->next = result->next; } } else { /* Partition this node */ curr = result; /* Assign result */ result = (struct cman_node*)(((char*)curr) + sz); /* Modify the size */ curr->sz -= sz; } result->next = NULL; return (void*)result; }
// A simple page allocator to get off the ground during entry char * enter_alloc(void) { if (newend == 0) newend = end; if ((uint) newend >= KERNBASE + 0x400000) panic("only first 4Mbyte are mapped during entry"); void *p = (void*)PGROUNDUP((uint)newend); memset(p, 0, PGSIZE); newend = newend + PGSIZE; return p; }
// Deallocate user pages to bring the process size from oldsz to // newsz. oldsz and newsz need not be page-aligned, nor does newsz // need to be less than oldsz. oldsz can be larger than the actual // process size. Returns the new process size. int deallocuvm(pde_t *pgdir, uint oldsz, uint newsz) { char *a = (char *)PGROUNDUP(newsz); char *last = PGROUNDDOWN(oldsz - 1); for(; a <= last; a += PGSIZE){ pte_t *pte = walkpgdir(pgdir, a, 0); if(pte && (*pte & PTE_P) != 0){ uint pa = PTE_ADDR(*pte); if(pa == 0) panic("kfree"); kfree((void *) pa); *pte = 0; } } return newsz < oldsz ? newsz : oldsz; }
Addr VG_(client_alloc)(Addr addr, UInt len, UInt prot, UInt flags) { len = PGROUNDUP(len); if (!(flags & SF_FIXED)) addr = VG_(find_map_space)(addr, len, True); flags |= SF_CORE; if (VG_(mmap)((void *)addr, len, prot, VKI_MAP_FIXED | VKI_MAP_PRIVATE | VKI_MAP_ANONYMOUS | VKI_MAP_CLIENT, -1, 0) == (void *)addr) { VG_(map_segment)(addr, len, prot, flags); return addr; } return 0; }
/* Returns 0 on success */ int memmap(void *va, size_t length, int prot, void *pa) { u_int numcompleted=0, i; u_int num_pages = PGNO(PGROUNDUP(length)); int err; Pte *ptes = alloca(num_pages * sizeof(Pte)); for (i = 0; i < num_pages; i++) ptes[i] = ((int)pa + i*NBPG) | prot | PG_GUEST; err = sys_insert_pte_range(0, ptes, num_pages, (u_int)va, &numcompleted, 0 /* u_int ke FIXME */, vmstate.eid); if (err || numcompleted!=num_pages) return -1; else return 0; }