// Given a parent process's page table, create a copy // of it for a child. pde_t* copyuvm(pde_t *pgdir, uint sz) { pde_t *d; pte_t *pte; uint pa, i, flags; char *mem; if((d = setupkvm()) == 0) return 0; for(i = 0; i < sz; i += PGSIZE){ if((pte = walkpgdir(pgdir, (void *) i, 0)) == 0) panic("copyuvm: pte should exist"); if(*pte & PTE_MMAP) { // We will copy shared mmaps in copy_mmap. continue; } if(!(*pte & PTE_P)) { panic("copyuvm: page not present"); } pa = PTE_ADDR(*pte); flags = PTE_FLAGS(*pte); if((mem = kalloc()) == 0) goto bad; memmove(mem, (char*)p2v(pa), PGSIZE); if(mappages(d, (void*)i, PGSIZE, v2p(mem), flags) < 0) goto bad; } return d; bad: freevm(d); return 0; }
// Given a parent process's page table, create a copy // of it for a child. pde_t* copyuvm(pde_t *pgdir, uint sz) { pde_t *d; pte_t *pte; uint pa, i; char *mem; if((d = setupkvm()) == 0) return 0; for(i = PGSIZE; i < sz; i += PGSIZE){ if((pte = walkpgdir(pgdir, (void*)i, 0)) == 0) panic("copyuvm: pte should exist"); if(!(*pte & PTE_P)) panic("copyuvm: page not present"); pa = PTE_ADDR(*pte); if((mem = kalloc()) == 0) goto bad; memmove(mem, (char*)pa, PGSIZE); if(mappages(d, (void*)i, PGSIZE, PADDR(mem), PTE_W|PTE_U) < 0) goto bad; } return d; bad: freevm(d); return 0; }
// Allocate page tables and physical memory to grow process from oldsz to // newsz, which need not be page aligned. Returns new size or 0 on error. int allocuvm(pde_t *pgdir, uint oldsz, uint newsz) { char *mem; uint a; if(newsz > USERTOP) return 0; if(newsz < oldsz) return oldsz; a = PGROUNDUP(oldsz); for(; a < newsz; a += PGSIZE){ mem = kalloc(); if(mem == 0){ cprintf("allocuvm out of memory\n"); deallocuvm(pgdir, newsz, oldsz); return 0; } memset(mem, 0, PGSIZE); mappages(pgdir, (char*)a, PGSIZE, PADDR(mem), PTE_W|PTE_U); } return newsz; }
// TODO(byan23): Copy the stack at the end of addr space. // Given a parent process's page table, create a copy // of it for a child. pde_t* copyuvm(pde_t *pgdir, uint sz) { pde_t *d; pte_t *pte; uint pa, i; char *mem; if((d = setupkvm()) == 0) return 0; // Copy code + heap. if (proc->pid == 1) i = 0; else i = PGSIZE; for(; i < sz; i += PGSIZE){ if((pte = walkpgdir(pgdir, (void*)i, 0)) == 0) panic("copyuvm: pte should exist"); if(!(*pte & PTE_P)) panic("copyuvm: page not present"); pa = PTE_ADDR(*pte); if((mem = kalloc()) == 0) goto bad; memmove(mem, (char*)pa, PGSIZE); if(mappages(d, (void*)i, PGSIZE, PADDR(mem), PTE_W|PTE_U) < 0) goto bad; } //cprintf("before coping stack from pid %d.\n", proc->pid); // TODO(byan23): Copy more stack as it grows. // Copy stack. i = USERTOP - proc->ssz; //i = USERTOP - PGSIZE; if((pte = walkpgdir(pgdir, (void*)i, 0)) == 0) panic("copyuvm: pte should exist"); if(!(*pte & PTE_P)) panic("copyuvm: page not present"); pa = PTE_ADDR(*pte); if((mem = kalloc()) == 0) goto bad; memmove(mem, (char*)pa, PGSIZE); if(mappages(d, (void*)i, PGSIZE, PADDR(mem), PTE_W|PTE_U) < 0) goto bad; return d; bad: freevm(d); return 0; }
// Given a parent process's page table, create a copy // of it for a child. pde_t* copyuvm(pde_t *pgdir, uint sz, uint s_sz) { pde_t *d; pte_t *pte; uint pa, i, stack_size; char *mem; if((d = setupkvm()) == 0) return 0; // Start at PGSIZE to make the first page invalid for(i = PGSIZE; i < sz; i += PGSIZE){ if((pte = walkpgdir(pgdir, (void*)i, 0)) == 0) panic("copyuvm: pte should exist"); if(!(*pte & PTE_P)) panic("copyuvm: page not present"); pa = PTE_ADDR(*pte); if((mem = kalloc()) == 0) goto bad; memmove(mem, (char*)pa, PGSIZE); if(mappages(d, (void*)i, PGSIZE, PADDR(mem), PTE_W|PTE_U) < 0) goto bad; } // We need to loop again to copy the stack over stack_size = s_sz; for(i = stack_size; i < USERTOP; i+= PGSIZE){ if((pte = walkpgdir(pgdir, (void*)i, 0)) == 0) panic("copyuvm: pte should exist"); if(!(*pte & PTE_P)) panic("copyuvm: page not present"); pa = PTE_ADDR(*pte); if((mem = kalloc()) == 0) goto bad; memmove(mem, (char*)pa, PGSIZE); if(mappages(d, (void*)i, PGSIZE, PADDR(mem), PTE_W|PTE_U) < 0) goto bad; } return d; bad: freevm(d); return 0; }
// Set up kernel part of a page table. pde_t* setupkvm(void) { pde_t *pgdir; // Allocate page directory if(!(pgdir = (pde_t *) kalloc())) return 0; memset(pgdir, 0, PGSIZE); if(// Map IO space from 640K to 1Mbyte !mappages(pgdir, (void *)USERTOP, 0x60000, USERTOP, PTE_W) || // Map kernel and free memory pool !mappages(pgdir, (void *)0x100000, PHYSTOP-0x100000, 0x100000, PTE_W) || // Map devices such as ioapic, lapic, ... !mappages(pgdir, (void *)0xFE000000, 0x2000000, 0xFE000000, PTE_W)) return 0; return pgdir; }
// Load the initcode into address 0 of pgdir. // sz must be less than a page. void inituvm(pde_t *pgdir, char *init, uint sz) { char *mem; if (sz >= PGSIZE) panic("inituvm: more than a page"); mem = kalloc(); memset(mem, 0, PGSIZE); mappages(pgdir, 0, PGSIZE, v2p(mem), PTE_W | PTE_U); memmove(mem, init, sz); }
// Given a parent process's page table, create a copy // of it for a child. pde_t* copyuvm(pde_t *pgdir, uint sz, uint stack_addr) { pde_t *d; pte_t *pte; uint pa, i; char *mem; if((d = setupkvm()) == 0) return 0; for(i = PGSIZE; i < sz; i += PGSIZE){ if((pte = walkpgdir(pgdir, (void*)i, 0)) == 0) panic("copyuvm: pte should exist"); if(!(*pte & PTE_P)) panic("copyuvm: page not present"); pa = PTE_ADDR(*pte); if((mem = kalloc()) == 0) goto bad; memmove(mem, (char*)pa, PGSIZE); if(mappages(d, (void*)i, PGSIZE, PADDR(mem), PTE_W|PTE_U) < 0) goto bad; } // Copy the last page which is the new stack if((pte = walkpgdir(pgdir, (void*)(USERTOP-PGSIZE), 1)) == 0)//undestand what pgdir does panic("copyuvm: pte should exist"); if(!(*pte & PTE_P)) panic("copyuvm: page not present"); //cprintf(" last page is %d\n",pte); pa = PTE_ADDR(*pte);// what pa are they getting from pte if((mem = kalloc()) == 0) goto bad; memmove(mem, (char*)pa, PGSIZE); // copying some stuff.. figure out what 1 page is and copying it if(mappages(d, (void*)USERTOP-PGSIZE, PGSIZE, PADDR(mem), PTE_W|PTE_U) < 0)//mapping into new address space and making it valid goto bad; return d; bad: freevm(d); return 0; }
void* shmem_access(int page_number) { if(page_number<0||page_number>3) {return NULL;} //get physical address of the shared page void* sharedPhyAddr=shmem_addr[page_number]; if(proc->bitmap[page_number]!=-1) { return (void*)(proc->bitmap[page_number]); } int i=1;//loops from 1 to 4 and finds the page in AS. if 1 then last page is available , if 2 then second last is available and so on. search till the present is -pde&PTE_P is 1. void* VA; pde_t *pde; //cprintf("before shmem access in pid %d, shared pages = %d\n", proc->pid,proc->procShmemCount); for(i=1;i<=4;i++) { VA=(void*)(USERTOP-4096*i); pde=walkpgdir(proc->pgdir,VA,0); //cprintf("i=%d\n",i); if(~*pde&PTE_P) { // cprintf("page found number=%d\n",i); //page found break; } } if(i==5) //no page is found return error { return NULL; } else { if(proc->bitmap[page_number]==-1) { //mapping to the physical address of shared page mappages(proc->pgdir,(void*)(USERTOP-4096*i),(uint)PGSIZE,(uint)sharedPhyAddr,PTE_W|PTE_U); //updating shared pages only if the mapping is successfull proc->procShmemCount++; inc_shmem_proc_count(page_number); //shmem_proc_count[page_number]++; proc->bitmap[page_number]=USERTOP-4096*i; } //cprintf("after shmem access in pid %d, shared pages = %d\n", proc->pid,proc->procShmemCount); //cprintf("address=%p\n",returnAddress,(void*)returnAddress); return (void*)(proc->bitmap[page_number]); } return NULL; }
// Load the initcode into address 0 of pgdir. // sz must be less than a page. void inituvm(pde_t *pgdir, char *init, uint sz) { char *mem; if(sz >= PGSIZE) panic("inituvm: more than a page"); mem = kalloc(); memset(mem, 0, PGSIZE); //cprintf("inituvm: page is allocated at %x\n", mem); mappages(pgdir, 0, PGSIZE, v2p(mem), UVMPDXATTR, UVMPTXATTR); memmove(mem, init, sz); }
// Load the initcode into address 0 of pgdir. sz must be less than a page. void inituvm (pgd_t *pgdir, char *init, uint sz) { char *mem; if (sz >= PTE_SZ) { panic("inituvm: more than a page"); } mem = alloc_page(); memset(mem, 0, PTE_SZ); mappages(pgdir, 0, PTE_SZ, v2p(mem), AP_RW_1_0); memmove(mem, init, sz); }
// Load the initcode into address 0 of pgdir. // sz must be less than a page. void inituvm(pde_t *pgdir, char *init, uint sz) { char *mem; if(sz >= PGSIZE) panic("inituvm: more than a page"); if ((mem = kalloc()) == 0) panic("inituvm: cannot allocate memory"); memset(mem, 0, PGSIZE); if (mappages(pgdir, 0, PGSIZE, V2P(mem), PTE_W|PTE_U) < 0) panic("inituvm: cannot create pagetable"); memmove(mem, init, sz); }
void swapIn(struct proc* p) { //read from file char filename[9]; struct file* f; uint i; char* buff; pte_t* pte; //cprintf("swapin %s %d\n", p->name, p->pid); getSwapFileName(p, filename); release(&ptable.lock); f = openKernelFile(filename, O_RDWR); //cprintf("1"); acquire(&ptable.lock); if(f == 0) panic("swapin: file open error\n"); f->off = 0; //p->pgdir = setupkvm(); if (!p->pgdir) panic("swapin: setupkvm failed\n"); int recovered = 0; for (i = 0; i < p->sz; i += PGSIZE) { if((pte = walkpgdir(p->pgdir, (char*) i, 0)) == 0){ //cprintf("skip"); //continue; } if(*pte != 0) continue; if (!(buff = kalloc())) panic("swapin: kalloc failed\n"); release(&ptable.lock); fileread(f, buff, PGSIZE); acquire(&ptable.lock); //cprintf("(%s)", buff); if (mappages(p->pgdir, (void*) i, PGSIZE, v2p(buff), PTE_W | PTE_U) < 0) panic("swapin: mappages failed\n"); recovered++; } //cprintf("swapin recovered %d\n", recovered); release(&ptable.lock); fileclose(f); //cprintf("swapin2"); //unlinkKernelFile(filename); acquire(&ptable.lock); //cprintf("swapin3"); }
// Set up kernel part of a page table. pde_t* setupkvm(void) { pde_t *pgdir; struct kmap *k; if ((pgdir = (pde_t*) kalloc()) == 0) return 0; memset(pgdir, 0, PGSIZE); if (p2v(PHYSTOP) > (void*) DEVSPACE) panic("PHYSTOP too high"); for (k = kmap; k < &kmap[NELEM(kmap)]; k++) if (mappages(pgdir, k->virt, k->phys_end - k->phys_start, (uint) k->phys_start, k->perm) < 0) return 0; return pgdir; }
// Set up kernel part of a page table. pde_t* setupkvm(void) { pde_t *pgdir; struct kmap *k; if((pgdir = (pde_t*)kalloc()) == 0) return 0; memset(pgdir, 0, PGSIZE); k = kmap; for(k = kmap; k < &kmap[NELEM(kmap)]; k++) if(mappages(pgdir, k->p, k->e - k->p, (uint)k->p, k->perm) < 0) return 0; return pgdir; }
Pml4e * setupkvm(void) { Pml4e *pgmap = kalloc(); Kmap *k; if (pgmap == nil) return nil; memzero(pgmap, PGSIZE); for (k = kmap; k < &kmap[nelem(kmap)]; k++) if (mappages(pgmap, k->addr, k->phys_end-k->phys_start, k->phys_start, k->perm) < 0) return nil; return pgmap; }
// Allocate memory to the process to bring its size from oldsz to // newsz. Allocates physical memory and page table entries. oldsz and // newsz need not be page-aligned, nor does newsz have to be larger // than oldsz. Returns the new process size or 0 on error. int allocuvm(pde_t *pgdir, uint oldsz, uint newsz) { if(newsz > USERTOP) return 0; char *a = (char *)PGROUNDUP(oldsz); char *last = PGROUNDDOWN(newsz - 1); for (; a <= last; a += PGSIZE){ char *mem = kalloc(); if(mem == 0){ cprintf("allocuvm out of memory\n"); deallocuvm(pgdir, newsz, oldsz); return 0; } memset(mem, 0, PGSIZE); mappages(pgdir, a, PGSIZE, PADDR(mem), PTE_W|PTE_U); } return newsz > oldsz ? newsz : oldsz; }
uint shmemget(uint n) { uint addr; if(n >= SHMEMPGNO) return 0; else if(proc->shmem[n] != 0) return proc->shmem[n]; else{ addr = proc->shmembd - PGSIZE; if(addr < proc->sz) return 0; mappages(proc->pgdir, (char*)addr, PGSIZE, shmem[n], PTE_W|PTE_U|PTE_S); proc->shmem[n] = addr; proc->shmembd = addr; shmemusr[n]++; return addr; } }
// Given a parent process's page table, create a copy // of it for a child. pgd_t* copyuvm (pgd_t *pgdir, uint sz) { pgd_t *d; pte_t *pte; uint64 pa, i, ap; char *mem; // allocate a new first level page directory d = kpt_alloc(); if (d == NULL ) { return NULL ; } // copy the whole address space over (no COW) for (i = 0; i < sz; i += PTE_SZ) { if ((pte = walkpgdir(pgdir, (void *) i, 0)) == 0) { panic("copyuvm: pte should exist"); } if (!(*pte & (ENTRY_PAGE | ENTRY_VALID))) { panic("copyuvm: page not present"); } pa = PTE_ADDR (*pte); ap = PTE_AP (*pte); if ((mem = alloc_page()) == 0) { goto bad; } memmove(mem, (char*) p2v(pa), PTE_SZ); if (mappages(d, (void*) i, PTE_SZ, v2p(mem), ap) < 0) { goto bad; } } return d; bad: freevm(d); return 0; }
// Load the initcode into address 0 of pgdir. // sz must be less than a page. // CHANGE inituvm now starts at 4096, adds an extra // NULL page that will cause pagefault // with gen_null_page void inituvm(pde_t *pgdir, char *init, uint sz) { char *mem; if(sz >= PGSIZE) panic("inituvm: more than a page"); //gen_null_page(pgdir); if ((mem = kalloc()) == 0) panic("inituvm: cannot allocate memory"); memset(mem, 0, PGSIZE); if (mappages(pgdir, (char *)PGSIZE, PGSIZE, v2p(mem), PTE_W|PTE_U) < 0) panic("inituvm: cannot create pagetable"); memmove(mem, init, sz); acquire(&r_c.lock); r_c.ref_count[v2p(mem) / 4096] = 1; release(&r_c.lock); }
Pml4e * copyuvm(Pml4e *oldmap, usize sz) { uintptr a; Pml4e *newmap; Pte *pte; uchar *oldmem, *newmem; uint flags; newmap = setupkvm(); if (newmap == nil) return nil; for (a = 0; a < sz; a += PGSIZE) { pte = walkpgmap(oldmap, (void *)a, 0); if (pte == nil) panic("copyuvm - nil pte"); if (!*pte & PTE_P) panic("copyuvm - page not present"); oldmem = p2v(pte_addr(*pte)); flags = pte_flags(*pte); newmem = kalloc(); if (newmem == nil) goto bad; memmove(newmem, oldmem, PGSIZE); if (mappages(newmap, (void *)a, PGSIZE, v2p(newmem), flags) < 0) goto bad; } return newmap; bad: freeuvm(newmap); return nil; }
void userinit(void) { struct proc *p; extern char _binary_kernel_initcode_start[], _binary_kernel_initcode_size[]; char *mem; p = allocproc(); assert(p); initproc = p; if((p->pgdir = setupkvm(kalloc)) == NULL) panic("userinit: out of memory?"); if ((int)_binary_kernel_initcode_size > PGSIZE) panic("inituvm: initcode more than a page"); mem = kalloc(); memset(mem, 0, PGSIZE); mappages(p->pgdir, 0, PGSIZE, V2P(mem), PTE_W|PTE_U, kalloc); memcpy(mem, (char *)_binary_kernel_initcode_start, (int)_binary_kernel_initcode_size); safestrcpy(p->name, "initcode", sizeof(p->name)); p->brk = PGSIZE; memset(p->tf, 1, sizeof(*p->tf)); p->tf->cs = (SEG_UCODE << 3) | DPL_USER; p->tf->ss = (SEG_UDATA << 3) | DPL_USER; p->tf->ds = p->tf->es = p->tf->fs = p->tf->gs = p->tf->ss; p->tf->eflags = FL_IF; p->tf->esp = PGSIZE; p->tf->eip = 0; // beginning of initcode p->counter = p->priority = 10; p->state = RUNNABLE; }
// Allocate page tables and physical memory to grow process from oldsz to // newsz, which need not be page aligned. Returns new size or 0 on error. int allocuvm(pde_t *pgdir, uint oldsz, uint newsz) { char *mem; uint a; if (newsz >= KERNBASE) return 0; if (newsz < oldsz) return oldsz; a = PGROUNDUP(oldsz); for (; a < newsz; a += PGSIZE) { mem = kalloc(); if (mem == 0) { deallocuvm(pgdir, newsz, oldsz); return 0; } memset(mem, 0, PGSIZE); mappages(pgdir, (char*) a, PGSIZE, v2p(mem), PTE_W | PTE_U); #ifndef NONE /* a&k start */ if (isNotInitShell(proc)) { int ans = 0; if ((proc->swapData).nPhysicalPages >= MAX_PSYC_PAGES) ans = swapOut(pgdir); if (ans < 0) panic("Can't swap out!"); #if defined(FIFO) || defined(SCFIFO) int index = addMemAddr(PGROUNDDOWN(a)); (proc->swapData).creationTime[index] = ticks; #endif #ifdef NFU addMemAddr(PGROUNDDOWN(a)); #endif } /* a&k end */ #endif } return newsz; }
// Allocate page tables and physical memory to grow process from oldsz to // newsz, which need not be page aligned. Returns new size or 0 on error. int allocuvm(pde_t *pgdir, uint oldsz, uint newsz) { char *mem; uint a; if((newsz >= KERNBASE)||( newsz >= (uint)(p2v(PHYSTOP) - proc->ssm))) return 0; if(newsz < oldsz) return oldsz; a = PGROUNDUP(oldsz); for(; a < newsz; a += PGSIZE){ mem = kalloc(); if(mem == 0){ cprintf("allocuvm out of memory\n"); deallocuvm(pgdir, newsz, oldsz); return 0; } memset(mem, 0, PGSIZE); mappages(pgdir, (char*)a, PGSIZE, v2p(mem), PTE_W|PTE_U); } return newsz; }
// Allocate page tables and physical memory to grow process from oldsz to // newsz, which need not be page aligned. Returns new size or 0 on error. int allocuvm(pde_t *pgdir, uint oldsz, uint newsz)//allocates memory from oldsz to newsz { char *mem; uint a; if(newsz > USERTOP) return 0; if(newsz < oldsz) return oldsz; a = PGROUNDUP(oldsz); for(; a < newsz; a += PGSIZE){ mem = kalloc();//gets physical memory to map to the address space if(mem == 0){ cprintf("allocuvm out of memory\n"); deallocuvm(pgdir, newsz, oldsz); return 0; } memset(mem, 0, PGSIZE); mappages(pgdir, (char*)a, PGSIZE, PADDR(mem), PTE_W|PTE_U);//virtual address maps 'a' to the physical memory mem and writable in address space } return newsz; }
// Allocate page tables and physical memory to grow process from oldsz to // newsz, which need not be page aligned. Returns new size or 0 on error. int allocuvm(pde_t *pgdir, uint oldsz, uint newsz) { char *mem; uint a; if(newsz >= USERBOUND) return 0; if(newsz < oldsz) return oldsz; a = PGROUNDUP(oldsz); for(; a < newsz; a += PGSIZE){ mem = kalloc(); if(mem == 0){ cprintf("allocuvm out of memory\n"); deallocuvm(pgdir, newsz, oldsz); return 0; } memset(mem, 0, PGSIZE); mappages(pgdir, (char*)a, PGSIZE, v2p(mem), UVMPDXATTR, UVMPTXATTR); } return newsz; }
void* shmem_access(int page_number) { if ( page_number < 0 || page_number >3 ) return NULL; if (proc->shmem[page_number] != NULL) { return (void*)proc->shmem[page_number]; } proc->acsshmem++; if ((USERTOP-PGSIZE*proc->acsshmem) <= proc->sz) return NULL; void* va; va = (void*)(USERTOP-PGSIZE*proc->acsshmem); mappages( proc->pgdir, va, PGSIZE, (unsigned int)shmem_addr[page_number], PTE_W|PTE_U); proc->shmem[page_number] = (int)va; shmemcount[page_number]++; return va; //map the physical address into the virtual address space from the high end //return the virtual address to the caller; }
void* shmem_access(int page_number) { if(page_number<0||page_number>3) { return NULL; } //get physical address of the shared page void* sharedPhyAddr=shmem_addr[page_number]; //get a new page at the last of the memory; /* void *la=(void*)(USERTOP-4096); mappages(proc->pgdir, la, PGSIZE,phyAddr, PTE_W|PTE_U); return temp; */ //get the process structure //fromprocess structure getpgdir, send it along with VA = USERTOP - multiples of 4096 and get pte_t //And pte_t with PTE_P and if it is one then it is already mapped- and move to next last page. //If not mapped then map wth page using mappages - map phyAddr of shared page with the page found out // pte_t *lastAvailPage= /* void* VA1=(void*)(USERTOP-4096); void* VA2=(void*)(4096); void* VA3=(void*)(0); pde_t *pde1= walkpgdir(proc->pgdir,VA1,0); pde_t *pde2= walkpgdir(proc->pgdir,VA2,0); pde_t *pde3= walkpgdir(proc->pgdir,VA3,0); //cprintf("pde=%d\n",pde); //cprintf("present=%d USERTOP=%d\n",*pde&PTE_P,USERTOP); cprintf("last page present = %d\n",*pde1&PTE_P); cprintf("second page present = %d\n",*pde2&PTE_P); cprintf("first page present = %d\n",*pde3&PTE_P); */ int i=1;//loops from 1 to 4 and finds the page in AS. if 1 then last page is available , if 2 then second last is available and so on. search till the present is -pde&PTE_P is 1. void* VA; pde_t *pde; cprintf("before shmem access in pid %d, shared pages = %d\n", proc->pid,proc->procShmemCount); for(i=1; i<=4; i++) { VA=(void*)(USERTOP-4096*i); pde=walkpgdir(proc->pgdir,VA,0); cprintf("i=%d\n",i); if(~*pde&PTE_P) { cprintf("page found number=%d\n",i); //page found break; } } if(i==5) //no page is found return error { return NULL; } else { //mapping to the physical address of shared page mappages(proc->pgdir,(void*)(USERTOP-4096*i),(uint)PGSIZE,(uint)sharedPhyAddr,PTE_W|PTE_U); //updating shared pages only if the mapping is successpfull proc->procShmemCount=i; int returnAddress=USERTOP-4096*i; cprintf("after shmem access in pid %d, shared pages = %d\n", proc->pid,proc->procShmemCount); cprintf("address=%p\n",returnAddress,(void*)returnAddress); return (void*)returnAddress; } return NULL; }
//PAGEBREAK: 41 void trap(struct trapframe *tf) { if(tf->trapno == T_SYSCALL){ if(proc->killed) exit(); proc->tf = tf; syscall(); if(proc->killed) exit(); return; } // Lazy page allocation if(tf->trapno == T_PGFLT) { uint a = PGROUNDDOWN(rcr2()); // round down faulting VA to page boundary char *mem; mem = kalloc(); memset(mem, 0, PGSIZE); //cprintf("Lazy page allocation at 0x%x\n", a); mappages(proc->pgdir, (char*)a, PGSIZE, v2p(mem), PTE_W|PTE_U); return; } switch(tf->trapno){ case T_IRQ0 + IRQ_TIMER: if(cpu->id == 0){ acquire(&tickslock); ticks++; if(proc && (tf->cs & 3) == 3){ proc->alarmleft--; if(proc->alarmleft == 0){ proc->alarmleft = proc->alarmticks; tf->esp -= 4; *(uint*)tf->esp = tf->eip; // XXX: security flaw, need check before tf->eip = (uint) proc->alarmhandler; } } wakeup(&ticks); release(&tickslock); } lapiceoi(); break; case T_IRQ0 + IRQ_IDE: ideintr(); lapiceoi(); break; case T_IRQ0 + IRQ_IDE+1: // Bochs generates spurious IDE1 interrupts. break; case T_IRQ0 + IRQ_KBD: kbdintr(); lapiceoi(); break; case T_IRQ0 + IRQ_COM1: uartintr(); lapiceoi(); break; case T_IRQ0 + 7: case T_IRQ0 + IRQ_SPURIOUS: cprintf("cpu%d: spurious interrupt at %x:%x\n", cpu->id, tf->cs, tf->eip); lapiceoi(); break; //PAGEBREAK: 13 default: if(proc == 0 || (tf->cs&3) == 0){ // In kernel, it must be our mistake. cprintf("unexpected trap %d from cpu %d eip %x (cr2=0x%x)\n", tf->trapno, cpu->id, tf->eip, rcr2()); panic("trap"); } // In user space, assume process misbehaved. cprintf("pid %d %s: trap %d err %d on cpu %d " "eip 0x%x addr 0x%x--kill proc\n", proc->pid, proc->name, tf->trapno, tf->err, cpu->id, tf->eip, rcr2()); proc->killed = 1; } // Force process exit if it has been killed and is in user space. // (If it is still executing in the kernel, let it keep running // until it gets to the regular system call return.) if(proc && proc->killed && (tf->cs&3) == DPL_USER) exit(); // Force process to give up CPU on clock tick. // If interrupts were on while locks held, would need to check nlock. if(proc && proc->state == RUNNING && tf->trapno == T_IRQ0+IRQ_TIMER) yield(); // Check if the process has been killed since we yielded if(proc && proc->killed && (tf->cs&3) == DPL_USER) exit(); }
// 1:1 map the memory [phy_low, phy_hi] in kernel. We need to // use 2-level mapping for this block of memory. The rumor has // it that ARMv6's small brain cannot handle the case that memory // be mapped in both 1-level page table and 2-level page. For // initial kernel, we use 1MB mapping, other memory needs to be // mapped as 4KB pages void paging_init (uint64 phy_low, uint64 phy_hi) { mappages (P2V(&_kernel_pgtbl), P2V(phy_low), phy_hi - phy_low, phy_low, AP_RW_1_0); flush_tlb (); }