/* * Function to load a vnode into memory. */ static int emufs_loadvnode(struct emufs_fs *ef, uint32_t handle, int isdir, struct emufs_vnode **ret) { struct vnode *v; struct emufs_vnode *ev; unsigned i, num; int result; lock_acquire(ef->ef_emu->e_lock); num = vnodearray_num(ef->ef_vnodes); for (i=0; i<num; i++) { v = vnodearray_get(ef->ef_vnodes, i); ev = v->vn_data; if (ev->ev_handle == handle) { /* Found */ VOP_INCREF(&ev->ev_v); lock_release(ef->ef_emu->e_lock); *ret = ev; return 0; } } /* Didn't have one; create it */ ev = kmalloc(sizeof(struct emufs_vnode)); if (ev==NULL) { lock_release(ef->ef_emu->e_lock); return ENOMEM; } ev->ev_emu = ef->ef_emu; ev->ev_handle = handle; result = vnode_init(&ev->ev_v, isdir ? &emufs_dirops : &emufs_fileops, &ef->ef_fs, ev); if (result) { lock_release(ef->ef_emu->e_lock); kfree(ev); return result; } result = vnodearray_add(ef->ef_vnodes, &ev->ev_v, NULL); if (result) { /* note: vnode_cleanup undoes vnode_init - it does not kfree */ vnode_cleanup(&ev->ev_v); lock_release(ef->ef_emu->e_lock); kfree(ev); return result; } lock_release(ef->ef_emu->e_lock); *ret = ev; return 0; }
/* * Unmount code. * * VFS calls FS_SYNC on the filesystem prior to unmounting it. */ static int sfs_unmount(struct fs *fs) { struct sfs_fs *sfs = fs->fs_data; vfs_biglock_acquire(); /* Do we have any files open? If so, can't unmount. */ if (vnodearray_num(sfs->sfs_vnodes) > 0) { vfs_biglock_release(); return EBUSY; } /* We should have just had sfs_sync called. */ KASSERT(sfs->sfs_superdirty == false); KASSERT(sfs->sfs_freemapdirty == false); /* Once we start nuking stuff we can't fail. */ vnodearray_destroy(sfs->sfs_vnodes); bitmap_destroy(sfs->sfs_freemap); /* The vfs layer takes care of the device for us */ (void)sfs->sfs_device; /* Destroy the fs object */ kfree(sfs); /* nothing else to do */ vfs_biglock_release(); return 0; }
/* * VOP_RECLAIM * * Reclaim should make an effort to returning errors other than EBUSY. */ static int emufs_reclaim(struct vnode *v) { struct emufs_vnode *ev = v->vn_data; struct emufs_fs *ef = v->vn_fs->fs_data; unsigned ix, i, num; int result; /* * Need both of these locks, e_lock to protect the device * and vfs_biglock to protect the fs-related material. */ vfs_biglock_acquire(); lock_acquire(ef->ef_emu->e_lock); if (ev->ev_v.vn_refcount != 1) { lock_release(ef->ef_emu->e_lock); vfs_biglock_release(); return EBUSY; } /* emu_close retries on I/O error */ result = emu_close(ev->ev_emu, ev->ev_handle); if (result) { lock_release(ef->ef_emu->e_lock); vfs_biglock_release(); return result; } num = vnodearray_num(ef->ef_vnodes); ix = num; for (i=0; i<num; i++) { struct vnode *vx; vx = vnodearray_get(ef->ef_vnodes, i); if (vx == v) { ix = i; break; } } if (ix == num) { panic("emu%d: reclaim vnode %u not in vnode pool\n", ef->ef_emu->e_unit, ev->ev_handle); } vnodearray_remove(ef->ef_vnodes, ix); VOP_CLEANUP(&ev->ev_v); lock_release(ef->ef_emu->e_lock); vfs_biglock_release(); kfree(ev); return 0; }
/* * Reclaim - drop a vnode that's no longer in use. */ static int semfs_reclaim(struct vnode *vn) { struct semfs_vnode *semv = vn->vn_data; struct semfs *semfs = semv->semv_semfs; struct vnode *vn2; struct semfs_sem *sem; unsigned i, num; lock_acquire(semfs->semfs_tablelock); /* vnode refcount is protected by the vnode's ->vn_countlock */ spinlock_acquire(&vn->vn_countlock); if (vn->vn_refcount > 1) { /* consume the reference VOP_DECREF passed us */ vn->vn_refcount--; spinlock_release(&vn->vn_countlock); lock_release(semfs->semfs_tablelock); return EBUSY; } spinlock_release(&vn->vn_countlock); /* remove from the table */ num = vnodearray_num(semfs->semfs_vnodes); for (i=0; i<num; i++) { vn2 = vnodearray_get(semfs->semfs_vnodes, i); if (vn2 == vn) { vnodearray_remove(semfs->semfs_vnodes, i); break; } } if (semv->semv_semnum != SEMFS_ROOTDIR) { sem = semfs_semarray_get(semfs->semfs_sems, semv->semv_semnum); KASSERT(sem->sems_hasvnode); sem->sems_hasvnode = false; if (sem->sems_linked == false) { semfs_semarray_set(semfs->semfs_sems, semv->semv_semnum, NULL); semfs_sem_destroy(sem); } } /* done with the table */ lock_release(semfs->semfs_tablelock); /* destroy it */ semfs_vnode_destroy(semv); return 0; }
/* * Look up the vnode for a semaphore by number; if it doesn't exist, * create it. */ int semfs_getvnode(struct semfs *semfs, unsigned semnum, struct vnode **ret) { struct vnode *vn; struct semfs_vnode *semv; struct semfs_sem *sem; unsigned i, num; int result; /* Lock the vnode table */ lock_acquire(semfs->semfs_tablelock); /* Look for it */ num = vnodearray_num(semfs->semfs_vnodes); for (i=0; i<num; i++) { vn = vnodearray_get(semfs->semfs_vnodes, i); semv = vn->vn_data; if (semv->semv_semnum == semnum) { VOP_INCREF(vn); lock_release(semfs->semfs_tablelock); *ret = vn; return 0; } } /* Make it */ semv = semfs_vnode_create(semfs, semnum); if (semv == NULL) { lock_release(semfs->semfs_tablelock); return ENOMEM; } result = vnodearray_add(semfs->semfs_vnodes, &semv->semv_absvn, NULL); if (result) { semfs_vnode_destroy(semv); lock_release(semfs->semfs_tablelock); return ENOMEM; } if (semnum != SEMFS_ROOTDIR) { sem = semfs_semarray_get(semfs->semfs_sems, semnum); KASSERT(sem != NULL); KASSERT(sem->sems_hasvnode == false); sem->sems_hasvnode = true; } lock_release(semfs->semfs_tablelock); *ret = &semv->semv_absvn; return 0; }
static int sfs_sync(struct fs *fs) { struct sfs_fs *sfs; unsigned i, num; int result; vfs_biglock_acquire(); /* * Get the sfs_fs from the generic abstract fs. * * Note that the abstract struct fs, which is all the VFS * layer knows about, is actually a member of struct sfs_fs. * The pointer in the struct fs points back to the top of the * struct sfs_fs - essentially the same object. This can be a * little confusing at first. * * The following diagram may help: * * struct sfs_fs <-------------\ * : | * : sfs_absfs (struct fs) | <------\ * : : | | * : : various members | | * : : | | * : : fs_data ----------/ | * : : ...|... * : . VFS . * : . layer . * : other members ....... * : * : * * This construct is repeated with vnodes and devices and other * similar things all over the place in OS/161, so taking the * time to straighten it out in your mind is worthwhile. */ sfs = fs->fs_data; /* Go over the array of loaded vnodes, syncing as we go. */ num = vnodearray_num(sfs->sfs_vnodes); for (i=0; i<num; i++) { struct vnode *v = vnodearray_get(sfs->sfs_vnodes, i); VOP_FSYNC(v); } /* If the free block map needs to be written, write it. */ if (sfs->sfs_freemapdirty) { result = sfs_mapio(sfs, UIO_WRITE); if (result) { vfs_biglock_release(); return result; } sfs->sfs_freemapdirty = false; } /* If the superblock needs to be written, write it. */ if (sfs->sfs_superdirty) { result = sfs_wblock(sfs, &sfs->sfs_super, SFS_SB_LOCATION); if (result) { vfs_biglock_release(); return result; } sfs->sfs_superdirty = false; } vfs_biglock_release(); return 0; }
/* * Called when the vnode refcount (in-memory usage count) hits zero. * * This function should try to avoid returning errors other than EBUSY. */ static int sfs_reclaim(struct vnode *v) { struct sfs_vnode *sv = v->vn_data; struct sfs_fs *sfs = v->vn_fs->fs_data; unsigned ix, i, num; int result; vfs_biglock_acquire(); /* * Make sure someone else hasn't picked up the vnode since the * decision was made to reclaim it. (You must also synchronize * this with sfs_loadvnode.) */ if (v->vn_refcount != 1) { /* consume the reference VOP_DECREF gave us */ KASSERT(v->vn_refcount>1); v->vn_refcount--; vfs_biglock_release(); return EBUSY; } /* If there are no on-disk references to the file either, erase it. */ if (sv->sv_i.sfi_linkcount==0) { result = VOP_TRUNCATE(&sv->sv_v, 0); if (result) { vfs_biglock_release(); return result; } } /* Sync the inode to disk */ result = sfs_sync_inode(sv); if (result) { vfs_biglock_release(); return result; } /* If there are no on-disk references, discard the inode */ if (sv->sv_i.sfi_linkcount==0) { sfs_bfree(sfs, sv->sv_ino); } /* Remove the vnode structure from the table in the struct sfs_fs. */ num = vnodearray_num(sfs->sfs_vnodes); ix = num; for (i=0; i<num; i++) { struct vnode *v2 = vnodearray_get(sfs->sfs_vnodes, i); struct sfs_vnode *sv2 = v2->vn_data; if (sv2 == sv) { ix = i; break; } } if (ix == num) { panic("sfs: reclaim vnode %u not in vnode pool\n", sv->sv_ino); } vnodearray_remove(sfs->sfs_vnodes, ix); VOP_CLEANUP(&sv->sv_v); vfs_biglock_release(); /* Release the storage for the vnode structure itself. */ kfree(sv); /* Done */ return 0; }
/* * Function to load a inode into memory as a vnode, or dig up one * that's already resident. */ static int sfs_loadvnode(struct sfs_fs *sfs, uint32_t ino, int forcetype, struct sfs_vnode **ret) { struct vnode *v; struct sfs_vnode *sv; const struct vnode_ops *ops = NULL; unsigned i, num; int result; /* Look in the vnodes table */ num = vnodearray_num(sfs->sfs_vnodes); /* Linear search. Is this too slow? You decide. */ for (i=0; i<num; i++) { v = vnodearray_get(sfs->sfs_vnodes, i); sv = v->vn_data; /* Every inode in memory must be in an allocated block */ if (!sfs_bused(sfs, sv->sv_ino)) { panic("sfs: Found inode %u in unallocated block\n", sv->sv_ino); } if (sv->sv_ino==ino) { /* Found */ /* May only be set when creating new objects */ KASSERT(forcetype==SFS_TYPE_INVAL); VOP_INCREF(&sv->sv_v); *ret = sv; return 0; } } /* Didn't have it loaded; load it */ sv = kmalloc(sizeof(struct sfs_vnode)); if (sv==NULL) { return ENOMEM; } /* Must be in an allocated block */ if (!sfs_bused(sfs, ino)) { panic("sfs: Tried to load inode %u from unallocated block\n", ino); } /* Read the block the inode is in */ result = sfs_rblock(sfs, &sv->sv_i, ino); if (result) { kfree(sv); return result; } /* Not dirty yet */ sv->sv_dirty = false; /* * FORCETYPE is set if we're creating a new file, because the * block on disk will have been zeroed out and thus the type * recorded there will be SFS_TYPE_INVAL. */ if (forcetype != SFS_TYPE_INVAL) { KASSERT(sv->sv_i.sfi_type == SFS_TYPE_INVAL); sv->sv_i.sfi_type = forcetype; sv->sv_dirty = true; } /* * Choose the function table based on the object type. */ switch (sv->sv_i.sfi_type) { case SFS_TYPE_FILE: ops = &sfs_fileops; break; case SFS_TYPE_DIR: ops = &sfs_dirops; break; default: panic("sfs: loadvnode: Invalid inode type " "(inode %u, type %u)\n", ino, sv->sv_i.sfi_type); } /* Call the common vnode initializer */ result = VOP_INIT(&sv->sv_v, ops, &sfs->sfs_absfs, sv); if (result) { kfree(sv); return result; } /* Set the other fields in our vnode structure */ sv->sv_ino = ino; /* Add it to our table */ result = vnodearray_add(sfs->sfs_vnodes, &sv->sv_v, NULL); if (result) { VOP_CLEANUP(&sv->sv_v); kfree(sv); return result; } /* Hand it back */ *ret = sv; return 0; }
/* * VOP_RECLAIM * * Reclaim should make an effort to returning errors other than EBUSY. */ static int emufs_reclaim(struct vnode *v) { struct emufs_vnode *ev = v->vn_data; struct emufs_fs *ef = v->vn_fs->fs_data; unsigned ix, i, num; int result; /* * Need both of these locks: e_lock to protect the device, * and vn_countlock for the reference count. */ lock_acquire(ef->ef_emu->e_lock); spinlock_acquire(&ev->ev_v.vn_countlock); if (ev->ev_v.vn_refcount > 1) { /* consume the reference VOP_DECREF passed us */ ev->ev_v.vn_refcount--; spinlock_release(&ev->ev_v.vn_countlock); lock_release(ef->ef_emu->e_lock); return EBUSY; } KASSERT(ev->ev_v.vn_refcount == 1); /* * Since we hold e_lock and are the last ref, nobody can increment * the refcount, so we can release vn_countlock. */ spinlock_release(&ev->ev_v.vn_countlock); /* emu_close retries on I/O error */ result = emu_close(ev->ev_emu, ev->ev_handle); if (result) { lock_release(ef->ef_emu->e_lock); return result; } num = vnodearray_num(ef->ef_vnodes); ix = num; for (i=0; i<num; i++) { struct vnode *vx; vx = vnodearray_get(ef->ef_vnodes, i); if (vx == v) { ix = i; break; } } if (ix == num) { panic("emu%d: reclaim vnode %u not in vnode pool\n", ef->ef_emu->e_unit, ev->ev_handle); } vnodearray_remove(ef->ef_vnodes, ix); vnode_cleanup(&ev->ev_v); lock_release(ef->ef_emu->e_lock); kfree(ev); return 0; }