/* destroy all pages in physical memory and all swapped pages */ void as_destroy(struct addrspace *as) { int spl = splhigh(); int i = 0; //free all coremap entries for (; i < num_frames; i++) { if(coremap[i].state != FREE && coremap[i].addrspace == as){ coremap[i].addrspace = NULL; coremap[i].mapped_vaddr = 0; coremap[i].state = FREE; coremap[i].num_pages_allocated = 0; } } // free all pages in the swap file for(; i < array_getnum(as->as_regions); i++) { struct as_region* cur = (struct as_region*)array_getguy(as->as_regions, i); assert(cur->vbase % PAGE_SIZE == 0); // destroy all pages belonging to this region int j = 0; for (; j < cur->npages; j++) { vaddr_t page = cur->vbase + j * PAGE_SIZE; assert((page & PAGE_FRAME) == page); u_int32_t *pte = get_PTE_from_addrspace(as, page); if (((*pte & PTE_PRESENT) == 0) && ((*pte | PTE_SWAPPED) != 0)) { // if this page is in swap file... off_t file_slot = (*pte & SWAPFILE_OFFSET) >> 12; // the occupied bit must be set assert(bitmap_isset(swapfile_map, file_slot) != 0); bitmap_unmark(swapfile_map, file_slot); } } }
/* * Kill all sleeping threads. This is used during panic shutdown to make * sure they don't wake up again and interfere with the panic. */ static void thread_killall(void) { int i, result; assert(curspl>0); /* * Move all sleepers to the zombie list, to be sure they don't * wake up while we're shutting down. */ for (i=0; i<array_getnum(sleepers); i++) { struct thread *t = array_getguy(sleepers, i); kprintf("sleep: Dropping thread %s\n", t->t_name); /* * Don't do this: because these threads haven't * been through thread_exit, thread_destroy will * get upset. Just drop the threads on the floor, * which is safer anyway during panic. * * array_add(zombies, t); */ } result = array_setsize(sleepers, 0); /* shrinking array: not supposed to fail */ assert(result==0); }
/* * ASST1: Like thread_wakeup, but wake up at most one thread * sleeping on "sleep address" ADDR. */ void thread_wakeone(const void *addr) { int i, result; // meant to be called with interrupts off assert(curspl>0); // This is inefficient. Feel free to improve it. for (i=0; i<array_getnum(sleepers); i++) { struct thread *t = array_getguy(sleepers, i); if (t->t_sleepaddr == addr) { // Remove from list array_remove(sleepers, i); /* * Because we preallocate during thread_fork, * this should never fail. */ result = make_runnable(t); assert(result==0); break; } } }
pid_t newprocess(pid_t parent) { struct process *newproc; pid_t newpid; struct cv *newcv; newproc = (struct process *) kmalloc(sizeof(struct process)); if (newproc==NULL) return (pid_t) -ENOMEM; newcv = cv_create("childexit"); if (newcv==NULL) { kfree(newproc); return (pid_t) -ENOMEM; } newproc->filetable = NULL; newproc->parentpid = parent; newproc->childexit = newcv; newproc->exited = 0; lock_acquire(proctable_lock); newpid = array_getnum(proctable); array_add(proctable, newproc); lock_release(proctable_lock); return newpid+1; }
int as_define_stack(struct addrspace *as, vaddr_t *stackptr) { struct page *p; size_t npages; size_t curpage; struct uio ku; vaddr_t maxvaddr; vaddr_t lowerbound; int i; int result; unsigned int rval; vaddr_t stacktop; *stackptr = USERTOP; /* Do Stack ASLR */ if (randvnode!=NULL) { mk_kuio(&ku, &rval, 4, 0, UIO_READ); result = VOP_READ(randvnode, &ku); if (result) return result; maxvaddr = (vaddr_t) 0; for(i=0;i<array_getnum(as->pages);i++) { p = (struct page *) array_getguy(as->pages, i); if (p->vaddr>maxvaddr) maxvaddr = p->vaddr; } lowerbound = maxvaddr + ((STACKSIZE * PAGE_SIZE) + PAGE_SIZE); rval %= USERTOP - (USERTOP - lowerbound); *stackptr = (lowerbound + rval) & PAGE_FRAME; } npages = (size_t) STACKSIZE; stacktop = *stackptr - PAGE_SIZE * npages; for(curpage=0;curpage<npages;curpage++) { p = (struct page *) kmalloc(sizeof(struct page)); if (p==NULL) return ENOMEM; p->vaddr = stacktop + curpage * PAGE_SIZE; p->perms = P_R_B | P_W_B; array_add(as->pages, p); addpage(p->vaddr, curthread->t_pid, p->perms & P_R_B, p->perms & P_W_B, p->perms & P_X_B, NULL); } return 0; }
/* * as_fault: fault handling. Handle a fault on an address space, of * specified type, at specified address. * * Synchronization: none. We assume the address space is not shared, * so we don't lock it. */ int as_fault(struct addrspace *as, int faulttype, vaddr_t va) { struct vm_object *faultobj = NULL; struct lpage *lp; vaddr_t bot=0, top; int i, index, result; /* Find the vm_object concerned */ for (i=0; i<array_getnum(as->as_objects); i++) { struct vm_object *vmo; vmo = array_getguy(as->as_objects, i); bot = vmo->vmo_base; top = bot + PAGE_SIZE*array_getnum(vmo->vmo_lpages); if (va >= bot && va < top) { faultobj = vmo; break; } } if (faultobj==NULL) { DEBUG(DB_VM, "vm_fault: EFAULT: va=0x%x\n", va); return EFAULT; } /* Now get the logical page */ index = (va - bot) / PAGE_SIZE; lp = array_getguy(faultobj->vmo_lpages, index); if (lp == NULL) { /* zerofill page */ result = lpage_zerofill(&lp); if (result) { kprintf("vm: zerofill fault at 0x%x failed\n", va); return result; } array_setguy(faultobj->vmo_lpages, index, lp); } return lpage_fault(lp, as, faulttype, va); }
/* * ft_size() * This returns how many file descriptors are opened to the thread. */ int ft_size(struct filetable *ft) { assert(ft != NULL); int total = array_getnum(ft->filedescriptor); int i = 0; for (i = 0; i < ft_array_size(ft); i++) { if (ft_get(ft, i) == NULL) { total--; } } return total; }
/* * as_destroy: wipe out an address space by destroying its components. * Synchronization: none. */ void as_destroy(struct addrspace *as) { struct vm_object *vmo; int i; for (i = 0; i < array_getnum(as->as_objects); i++) { vmo = array_getguy(as->as_objects, i); vm_object_destroy(as, vmo); } array_destroy(as->as_objects); kfree(as); }
static void checksender(u_int16_t addr, struct sockaddr_un *rsun, socklen_t rlen) { int n, i; struct sender *sdr; int pathlen; assert(senders != NULL); assert(rsun != NULL); assert(addr != BROADCAST_ADDR); n = array_getnum(senders); for (i=0; i<n; i++) { sdr = array_getguy(senders, i); assert(sdr != NULL); if (sdr->sdr_addr == addr) { memcpy(&sdr->sdr_sun, rsun, sizeof(*rsun)); sdr->sdr_len = rlen; return; } } sdr = malloc(sizeof(struct sender)); if (!sdr) { fprintf(stderr, "hub161: out of memory\n"); exit(1); } pathlen = rlen; pathlen = pathlen - (sizeof(*rsun) - sizeof(rsun->sun_path)); printf("hub161: adding %04x from %.*s\n", addr, pathlen, rsun->sun_path); if (rsun->sun_path[0]!='/') { printf("hub161: (not absolute pathname, may not work)\n"); } sdr->sdr_addr = addr; memcpy(&sdr->sdr_sun, rsun, sizeof(*rsun)); sdr->sdr_len = rlen; sdr->sdr_errors = 0; if (array_add(senders, sdr)) { fprintf(stderr, "hub161: Out of memory\n"); exit(1); } }
/* * Return nonzero if there are any threads who are sleeping on "sleep address" * ADDR. This is meant to be used only for diagnostic purposes. */ int thread_hassleepers(const void *addr) { int i; // meant to be called with interrupts off assert(curspl>0); for (i=0; i<array_getnum(sleepers); i++) { struct thread *t = array_getguy(sleepers, i); if (t->t_sleepaddr == addr) { return 1; } } return 0; }
/* * as_copy: duplicate an address space. Creates a new address space and * copies each vm_object in the source address space into the new one. * Implements the VM system part of fork(). * * Synchronization: none. */ int as_copy(struct addrspace *as, struct addrspace **ret) { struct addrspace *newas; struct vm_object *vmo, *newvmo; int i, result; newas = as_create(); if (newas==NULL) { return ENOMEM; } /* * We assume that as belongs to curthread, and furthermore that * it's not shared with any other threads. (The latter restriction * is easily lifted; the former is not.) * * We assume that nothing is going to modify the source address * space except for the usual page evictions by other processes. */ assert(as==curthread->t_vmspace); /* copy the vmos */ for (i = 0; i < array_getnum(as->as_objects); i++) { vmo = array_getguy(as->as_objects, i); result = vm_object_copy(vmo, newas, &newvmo); if (result) { goto fail; } result = array_add(newas->as_objects, newvmo); if (result) { vm_object_destroy(newas, newvmo); goto fail; } } *ret = newas; return 0; fail: as_destroy(newas); return result; }
/* * Unmount code. * * VFS calls FS_SYNC on the filesystem prior to unmounting it. * * Locking: gets sfs_vnlock, then sfs_bitlock. */ static int sfs_unmount(struct fs *fs) { struct sfs_fs *sfs = fs->fs_data; lock_acquire(sfs->sfs_vnlock); lock_acquire(sfs->sfs_bitlock); /* Do we have any files open? If so, can't unmount. */ if (array_getnum(sfs->sfs_vnodes)>0) { lock_release(sfs->sfs_vnlock); lock_release(sfs->sfs_bitlock); return EBUSY; } /* * We should have just had sfs_sync called. * The VFS locking prevents anyone from opening any files on the * fs before we get here - in order to open any files, one would * have to go through the volume/device name stuff in vfslist.c, * and it's locked during the sync/unmount. */ assert(sfs->sfs_superdirty==0); assert(sfs->sfs_freemapdirty==0); /* Once we start nuking stuff we can't fail. */ array_destroy(sfs->sfs_vnodes); bitmap_destroy(sfs->sfs_freemap); /* The vfs layer takes care of the device for us */ (void)sfs->sfs_device; /* Free the lock. VFS guarantees we can do this safely */ lock_release(sfs->sfs_vnlock); lock_release(sfs->sfs_bitlock); lock_destroy(sfs->sfs_vnlock); lock_destroy(sfs->sfs_bitlock); /* Destroy the fs object */ kfree(sfs); /* nothing else to do */ return 0; }
/* place all entries into the pagetable */ int as_prepare_load(struct addrspace *as) { struct page *p; int i; int num = array_getnum(as->pages); for (i=0;i<num;i++) { p = (struct page *) array_getguy(as->pages, i); addpage(p->vaddr, curthread->t_pid, 1, 1, 1, NULL); // enable all permission for writing page in } return 0; }
/* * Remove zombies. (Zombies are threads/processes that have exited but not * been fully deleted yet.) */ static void exorcise(void) { int i, result; assert(curspl>0); for (i=0; i<array_getnum(zombies); i++) { struct thread *z = array_getguy(zombies, i); assert(z!=curthread); thread_destroy(z); } result = array_setsize(zombies, 0); /* Shrinking the array; not supposed to be able to fail. */ assert(result==0); }
/* calls md_freetld */ void as_destroy(struct addrspace *as) { struct page *p; int i; for(i=0;i<array_getnum(as->pages);i++) { p = (struct page *) array_getguy(as->pages, i); invalidatepage(p->vaddr); kfree(p); } invalidateswapentries(curthread->t_pid); array_destroy(as->pages); kfree(as); }
int sys_close(int fd) { struct sys_filemapping *mpg; struct process *proc; proc_filemapping *pmpg; int sys_index; mpg = resolvefd(fd); if (mpg==NULL) return -EBADF; lock_acquire(filetable_lock); mpg->refcnt--; proc = getcurprocess(); /* should never fail seeing how we just resolved it */ pmpg = (proc_filemapping *) array_getguy(proc->filetable, fd); sys_index = *pmpg; kfree(pmpg); array_setguy(proc->filetable, fd, NULL); if(mpg->refcnt>0) { lock_release(filetable_lock); return 0; } /* no more references to mapping */ vfs_close(mpg->vn); kfree(mpg); if (sys_index < array_getnum(proc->filetable)) array_setguy(proc->filetable, sys_index, NULL); lock_release(filetable_lock); return 0; }
void addrspace_dump(struct addrspace *as) { struct page *p; int num; int i; num = array_getnum(as->pages); kprintf("+-ADDRSPACE------+\n"); for(i=0;i<num;i++) { p = (struct page *) array_getguy(as->pages, i); kprintf("| %08x | %c%c%c |\n", p->vaddr, p->perms & P_R_B ? 'r' : '-', p->perms & P_W_B ? 'w' : '-', p->perms & P_X_B ? 'x' : '-'); } }
int as_complete_load(struct addrspace *as) { int i; struct page *p; int num = array_getnum(as->pages); /* update permissions on all pages */ for(i=0;i<num;i++) { p = (struct page *) array_getguy(as->pages, i); sys_mprotect(p->vaddr, PAGE_SIZE, (p->perms & P_R_B ? PROT_READ : 0) | (p->perms & P_W_B ? PROT_WRITE : 0) | (p->perms & P_X_B ? PROT_EXEC : 0)); } (void)as; return 0; }
struct process * getprocess(pid_t pid) { struct process *proc; lock_acquire(proctable_lock); pid -= 1; if ((pid > array_getnum(proctable)) || (pid < 0)) proc = NULL; else { proc = (struct process *) array_getguy(proctable, (int) pid); } lock_release(proctable_lock); return proc; }
int as_copy(struct addrspace *old, struct addrspace **ret, pid_t pid) { struct addrspace *newas; struct page *newpage, *page; int i; int nindex, oindex; paddr_t nf, of; newas = as_create(); if (newas==NULL) { return ENOMEM; } for(i=0;i<array_getnum(old->pages);i++) { newpage = (struct page *) kmalloc(sizeof(struct page)); if (newpage==NULL) return ENOMEM; page = (struct page *) array_getguy(old->pages, i); memcpy(newpage, page, sizeof(struct page)); array_add(newas->pages, newpage); oindex = getindex(newpage->vaddr); of = FRAME(oindex); nindex = addpage(newpage->vaddr, pid, newpage->perms & P_R_B, newpage->perms & P_W_B, newpage->perms & P_X_B, PADDR_TO_KVADDR(of)); } *ret = newas; return 0; }
static void killsenders(void) { struct sender *sdr; int n, i; assert(senders != NULL); n = array_getnum(senders); for (i=0; i<n; i++) { sdr = array_getguy(senders, i); assert(sdr != NULL); if (sdr->sdr_errors > 5) { printf("hub161: dropping %04x\n", sdr->sdr_addr); array_remove(senders, i); i--; n--; free(sdr); } } }
static void dosend(const char *pkt, size_t len) { struct sender *sdr; int r, n, i; assert(senders != NULL); assert(pkt != NULL); n = array_getnum(senders); for (i=0; i<n; i++) { sdr = array_getguy(senders, i); assert(sdr != NULL); r = sendto(sock, pkt, len, 0, (struct sockaddr *)&sdr->sdr_sun, sdr->sdr_len); if (r < 0) { fprintf(stderr, "hub161: sendto %04x: %s\n", sdr->sdr_addr, strerror(errno)); sdr->sdr_errors++; } } }
/* * Set up a segment at virtual address VADDR of size MEMSIZE. The * segment in memory extends from VADDR up to (but not including) * VADDR+MEMSIZE. * * The READABLE, WRITEABLE, and EXECUTABLE flags are set if read, * write, or execute permission should be set on the segment. At the * moment, these are ignored. * * Does not allow overlapping regions. */ int as_define_region(struct addrspace *as, vaddr_t vaddr, size_t sz, size_t lower_redzone, int readable, int writeable, int executable) { struct vm_object *vmo; int i, result; vaddr_t check_vaddr; /* vaddr to use for overlap check */ (void)readable; (void)writeable; // XYZ (void)executable; /* base address must be aligned */ assert((vaddr & PAGE_FRAME)==vaddr); /* redzone must be aligned */ assert((lower_redzone & PAGE_FRAME)==lower_redzone); /* redzone must fit */ assert(vaddr >= lower_redzone); check_vaddr = vaddr - lower_redzone; /* size may not be */ sz = ROUNDUP(sz, PAGE_SIZE); /* * Check for overlaps. */ for (i = 0; i < array_getnum(as->as_objects); i++) { vaddr_t bot, top; vmo = array_getguy(as->as_objects, i); assert(vmo != NULL); bot = vmo->vmo_base; top = bot + PAGE_SIZE*array_getnum(vmo->vmo_lpages); /* Check guard band, if any */ assert(bot >= vmo->vmo_lower_redzone); bot = bot - vmo->vmo_lower_redzone; if (check_vaddr+sz > bot && check_vaddr < top) { /* overlap */ return EINVAL; } } /* Create a new vmo. All pages are marked zerofilled. */ vmo = vm_object_create(sz/PAGE_SIZE); if (vmo == NULL) { return ENOMEM; } vmo->vmo_base = vaddr; vmo->vmo_lower_redzone = lower_redzone; /* Add it to the parent address space. */ result = array_add(as->as_objects, vmo); if (result) { vm_object_destroy(as, vmo); return result; } /* Done */ return 0; }
static int sfs_sync(struct fs *fs) { struct sfs_fs *sfs; int i, num, result; struct array *tmp; /* * Get the sfs_fs from the generic abstract fs. * * Note that the abstract struct fs, which is all the VFS * layer knows about, is actually a member of struct sfs_fs. * The pointer in the struct fs points back to the top of the * struct sfs_fs - essentially the same object. This can be a * little confusing at first. * * The following diagram may help: * * struct sfs_fs <-------------\ * : | * : sfs_absfs (struct fs) | <------\ * : : | | * : : various members | | * : : | | * : : fs_data ----------/ | * : : ...|... * : . VFS . * : . layer . * : other members ....... * : * : * * This construct is repeated with vnodes and devices and other * similar things all over the place in OS/161, so taking the * time to straighten it out in your mind is worthwhile. */ sfs = fs->fs_data; /* * This is kind of a hack. We can't acquire vnode locks while * holding sfs_vnlock, because that violates the ordering * constraints (see sfs_vnode.c) - so we *copy* the array of * loaded vnodes into a temporary array and sync those. */ tmp = array_create(); if (tmp == NULL) { return ENOMEM; } lock_acquire(sfs->sfs_vnlock); /* Go over the array of loaded vnodes. */ num = array_getnum(sfs->sfs_vnodes); for (i=0; i<num; i++) { struct sfs_vnode *sv = array_getguy(sfs->sfs_vnodes, i); VOP_INCREF(&sv->sv_v); if (array_add(tmp, sv) != 0) { // XXX panic("sfs_sync: array_add failed\n"); } } lock_release(sfs->sfs_vnlock); /* Now sync. */ num = array_getnum(tmp); for (i=0; i<num; i++) { struct sfs_vnode *sv = array_getguy(tmp, i); result = VOP_FSYNC(&sv->sv_v); if (result) { kprintf("SFS: Warning: syncing inode %d: %s\n", sv->sv_ino, strerror(result)); } VOP_DECREF(&sv->sv_v); } array_destroy(tmp); lock_acquire(sfs->sfs_bitlock); /* If the free block map needs to be written, write it. */ if (sfs->sfs_freemapdirty) { result = sfs_mapio(sfs, UIO_WRITE); if (result) { kprintf("SFS: Warning: syncing bitmap: %s\n", strerror(result)); } else { /* Only clear the dirty bit if we succeeded */ sfs->sfs_freemapdirty = 0; } } /* If the superblock needs to be written, write it. */ if (sfs->sfs_superdirty) { result = sfs_wblock(sfs, &sfs->sfs_super, SFS_SB_LOCATION); if (result) { kprintf("SFS: Warning: syncing superblock: %s\n", strerror(result)); } else { /* Only clear the dirty bit if we succeeded */ sfs->sfs_superdirty = 0; } } lock_release(sfs->sfs_bitlock); return 0; }
int waitpid(pid_t pid, int * status, int options, int *wait_pid) { struct process * p_node; int invalid_status_ptr1; int invalid_status_ptr2; int result; invalid_status_ptr1 = 0x80000000; invalid_status_ptr2 = 0x40000000; /* check the validity of parameter pid*/ if(pid > PID_MAX || pid <= 0 || pid > (array_getnum(pid_table) - 1) || options != 0) { return EINVAL; } if(status == NULL || status == (int *)invalid_status_ptr1 || status == (int *)invalid_status_ptr2) { return EFAULT; } /* if we didnot find the specific pid value in our pid_table*/ if(array_getguy(pid_table, pid) == NULL) { kprintf("process does not exist\n"); return EINVAL; } /* parent process can ONLY interested in own child process*/ if(((struct process *)array_getguy(pid_table, pid))->parent_pid != curthread->pid ) { kprintf("parent process NOT interested in own child process\n") ; return EINVAL; } /* get the process guy with specific pid*/ p_node = (struct process *)array_getguy(pid_table, pid); /* check if the child process has exited*/ P(p_node->exit_lock); /* child already really exit*/ /*return the exit code*/ *status = p_node->exit_code; /*return the child process id*/ *wait_pid = pid; result = freePid(pid); if(result) { return result; } return 0; }
/* * Called when the vnode refcount (in-memory usage count) hits zero. * * This function should try to avoid returning errors other than EBUSY. */ static int sfs_reclaim(struct vnode *v) { struct sfs_vnode *sv = v->vn_data; struct sfs_fs *sfs = v->vn_fs->fs_data; int ix, i, num, result; lock_acquire(sfs->sfs_vnodes_lock); /* * Make sure someone else hasn't picked up the vnode since the * decision was made to reclaim it. (You must also synchronize * this with sfs_loadvnode.) */ lock_acquire(v->vn_countlock); if (v->vn_refcount != 1) { /* consume the reference VOP_DECREF gave us */ assert(v->vn_refcount>1); v->vn_refcount--; lock_release(v->vn_countlock); lock_release(sfs->sfs_vnodes_lock); return EBUSY; } lock_release(v->vn_countlock); /* If there are no on-disk references to the file either, erase it. */ if (sv->sv_i.sfi_linkcount==0 && sv->sv_i.sfi_type==SFS_TYPE_FILE) { /* * VOP_TRUNCATE doesn't work on directories, which is why I added * the second requirement to the above if statement. */ result = VOP_TRUNCATE(&sv->sv_v, 0); if (result) { lock_release(sfs->sfs_vnodes_lock); return result; } } /* Sync the inode to disk */ result = sfs_sync_inode(sv); if (result) { lock_release(sfs->sfs_vnodes_lock); return result; } /* If there are no on-disk references, discard the inode */ if (sv->sv_i.sfi_linkcount==0) { sfs_bfree(sfs, sv->sv_ino); } /* Remove the vnode structure from the table in the struct sfs_fs. */ ix = -1; num = array_getnum(sfs->sfs_vnodes); for (i=0; i<num; i++) { struct sfs_vnode *sv2 = array_getguy(sfs->sfs_vnodes, i); if (sv2==sv) { ix = i; break; } } if (ix<0) { panic("sfs: reclaim vnode %u not in vnode pool\n", sv->sv_ino); } array_remove(sfs->sfs_vnodes, ix); VOP_KILL(&sv->sv_v); /* Release the storage for the vnode structure itself. */ kfree(sv); lock_release(sfs->sfs_vnodes_lock); /* Done */ return 0; }
/* * Function to load a inode into memory as a vnode, or dig up one * that's already resident. */ static int sfs_loadvnode(struct sfs_fs *sfs, u_int32_t ino, int forcetype, struct sfs_vnode **ret) { struct sfs_vnode *sv; const struct vnode_ops *ops = NULL; int i, num; int result; lock_acquire(sfs->sfs_vnodes_lock); /* Look in the vnodes table */ num = array_getnum(sfs->sfs_vnodes); /* Linear search. Is this too slow? You decide. */ for (i=0; i<num; i++) { sv = array_getguy(sfs->sfs_vnodes, i); /* Every inode in memory must be in an allocated block */ if (!sfs_bused(sfs, sv->sv_ino)) { panic("sfs: Found inode %u in unallocated block\n", sv->sv_ino); } if (sv->sv_ino==ino) { /* Found */ /* May only be set when creating new objects */ assert(forcetype==SFS_TYPE_INVAL); VOP_INCREF(&sv->sv_v); lock_release(sfs->sfs_vnodes_lock); *ret = sv; return 0; } } /* Didn't have it loaded; load it */ sv = kmalloc(sizeof(struct sfs_vnode)); if (sv==NULL) { lock_release(sfs->sfs_vnodes_lock); return ENOMEM; } /* Must be in an allocated block */ if (!sfs_bused(sfs, ino)) { panic("sfs: Tried to load inode %u from unallocated block\n", ino); } /* Read the block the inode is in */ result = sfs_rblock(sfs, &sv->sv_i, ino); if (result) { kfree(sv); lock_release(sfs->sfs_vnodes_lock); return result; } /* Not dirty yet */ sv->sv_dirty = 0; /* * FORCETYPE is set if we're creating a new file, because the * block on disk will have been zeroed out and thus the type * recorded there will be SFS_TYPE_INVAL. */ if (forcetype != SFS_TYPE_INVAL) { assert(sv->sv_i.sfi_type == SFS_TYPE_INVAL); sv->sv_i.sfi_type = forcetype; sv->sv_dirty = 1; } /* * Choose the function table based on the object type. */ switch (sv->sv_i.sfi_type) { case SFS_TYPE_FILE: ops = &sfs_fileops; break; case SFS_TYPE_DIR: ops = &sfs_dirops; break; default: panic("sfs: loadvnode: Invalid inode type " "(inode %u, type %u)\n", ino, sv->sv_i.sfi_type); } /* Call the common vnode initializer */ result = VOP_INIT(&sv->sv_v, ops, &sfs->sfs_absfs, sv); if (result) { kfree(sv); lock_release(sfs->sfs_vnodes_lock); return result; } /* Set the other fields in our vnode structure */ sv->sv_ino = ino; /* Add it to our table */ result = array_add(sfs->sfs_vnodes, sv); if (result) { VOP_KILL(&sv->sv_v); kfree(sv); lock_release(sfs->sfs_vnodes_lock); return result; } lock_release(sfs->sfs_vnodes_lock); /* Hand it back */ *ret = sv; return 0; }
int as_copy(struct addrspace *old, struct addrspace **ret) { int spl = splhigh(); struct addrspace *newas; newas = as_create(); if (newas == NULL) { return ENOMEM; } /******************** copy internal fields ***************/ // first all regions unsigned int i; for (i = 0; i < array_getnum(old->as_regions); i++) { struct as_region* temp = kmalloc(sizeof(struct as_region)); *temp = *((struct as_region*)array_getguy(old->as_regions, i)); array_add(newas->as_regions, temp); } newas->heap_start = old->heap_start; newas->heap_end = old->heap_end; newas->temp_text_permis = old->temp_text_permis; newas->temp_bss_permis = old->temp_bss_permis; // then both the first and second page table for (i = 0; i < FIRST_LEVEL_PT_SIZE; i++) { if(old->as_master_pagetable[i] != NULL) { newas->as_master_pagetable[i] = (struct as_pagetable*)kmalloc(sizeof(struct as_pagetable)); // what the f**k am i doing? // the right thing to do here is to go through all PTEs of the old addrspace, if there's a // valid PTE, meaning there's a page, be it PRESENT or SWAPPED, belonging to this addrspace. struct as_pagetable *src_pt = old->as_master_pagetable[i]; struct as_pagetable *dest_pt = newas->as_master_pagetable[i]; unsigned int j = 0; for (; j < SECOND_LEVEL_PT_SIZE; j++) { dest_pt->PTE[j] = 0; if(src_pt->PTE[j] & PTE_PRESENT) { // this source page is PRESENT in memory, we just allocate a page for // the destination addrspace and copy src->dest and update PTE paddr_t src_paddr = (src_pt->PTE[j] & PAGE_FRAME); vaddr_t dest_vaddr = (i << 22) + (j << 12); // allocate a page for the destination addrspace, while making sure // that both the source and destination page are in memory paddr_t dest_paddr = alloc_page_userspace_with_avoidance(dest_vaddr, src_paddr); // sanity check // do the copy :) memmove((void *) PADDR_TO_KVADDR(dest_paddr), (const void*)PADDR_TO_KVADDR(src_paddr), PAGE_SIZE) ; // update the PTE of the destination pagetable // dest_pt->PTE[j] &= CLEAR_PAGE_FRAME; dest_pt->PTE[j] |= dest_paddr; dest_pt->PTE[j] |= PTE_PRESENT; } else if (src_pt->PTE[j] & PTE_SWAPPED){ // this source page is SWAPPED, we load it back to mem :) vaddr_t src_vaddr = (i << 22) + (j << 12); vaddr_t dest_vaddr = src_vaddr; paddr_t src_paddr = load_swapped_page(old, src_vaddr); // now allocate a user page, but becareful not to swap out the // source page we just brought in... paddr_t dest_paddr = alloc_page_userspace_with_avoidance(dest_vaddr, src_paddr); // do the copy memmove((void *) PADDR_TO_KVADDR(dest_paddr), (const void*)PADDR_TO_KVADDR(src_paddr), PAGE_SIZE) ; // update the PTE of the destination pagetable // dest_pt->PTE[j] &= CLEAR_PAGE_FRAME; dest_pt->PTE[j] |= dest_paddr; dest_pt->PTE[j] |= PTE_PRESENT; } else { // this source page is neither PRESENT nor SWAPPED, meaning this // page does not exist, we've got nothing to do in this case, nice :) dest_pt->PTE[j] = 0; } } } else { newas->as_master_pagetable[i] = NULL; } } *ret = newas; splx(spl); return 0; }
/* * ft_array_size() * This returns how big the array of the file descriptor is, this **DOES NOT** * tell you how many file descriptors are opened to the thread. */ int ft_array_size(struct filetable *ft) { assert(ft != NULL); return (array_getnum(ft->filedescriptor)); }