/** * chfs_remap_leb - unmap and then map a leb * @chmp: chfs mount structure * * This function gets an eraseblock from the erasable queue, unmaps it through * EBH and maps another eraseblock to the same LNR. * EBH will find a free eraseblock if any or will erase one if there isn't any * free, just dirty block. * * Returns zero on case of success, errorcode otherwise. * * Needs more brainstorming here. */ int chfs_remap_leb(struct chfs_mount *chmp) { int err; struct chfs_eraseblock *cheb; dbg("chfs_remap_leb\n"); uint32_t dirty, unchecked, used, free, wasted; //dbg("chmp->chm_nr_erasable_blocks: %d\n", chmp->chm_nr_erasable_blocks); //dbg("ltree: %p ecl: %p\n", &chmp->chm_ebh->ltree_lock, &chmp->chm_lock_sizes); KASSERT(!rw_write_held(&chmp->chm_lock_wbuf)); KASSERT(mutex_owned(&chmp->chm_lock_mountfields)); KASSERT(mutex_owned(&chmp->chm_lock_sizes)); if (!chmp->chm_nr_erasable_blocks) { //TODO /* We don't have any erasable blocks, need to check if there are * blocks on erasable_pending_wbuf_queue, flush the data and then * we can remap it. * If there aren't any blocks on that list too, we need to GC? */ if (!TAILQ_EMPTY(&chmp->chm_erasable_pending_wbuf_queue)) { cheb = TAILQ_FIRST(&chmp->chm_erasable_pending_wbuf_queue); TAILQ_REMOVE(&chmp->chm_erasable_pending_wbuf_queue, cheb, queue); if (chmp->chm_wbuf_len) { mutex_exit(&chmp->chm_lock_sizes); chfs_flush_pending_wbuf(chmp); mutex_enter(&chmp->chm_lock_sizes); } TAILQ_INSERT_TAIL(&chmp->chm_erase_pending_queue, cheb, queue); chmp->chm_nr_erasable_blocks++; } else { /* We can't delete any block. */ //FIXME should we return ENOSPC? return ENOSPC; } } cheb = TAILQ_FIRST(&chmp->chm_erase_pending_queue); TAILQ_REMOVE(&chmp->chm_erase_pending_queue, cheb, queue); chmp->chm_nr_erasable_blocks--; dirty = cheb->dirty_size; unchecked = cheb->unchecked_size; used = cheb->used_size; free = cheb->free_size; wasted = cheb->wasted_size; // Free allocated node references for this eraseblock chfs_free_node_refs(cheb); err = chfs_unmap_leb(chmp, cheb->lnr); if (err) return err; err = chfs_map_leb(chmp, cheb->lnr); if (err) return err; // Reset state to default and change chmp sizes too chfs_change_size_dirty(chmp, cheb, -dirty); chfs_change_size_unchecked(chmp, cheb, -unchecked); chfs_change_size_used(chmp, cheb, -used); chfs_change_size_free(chmp, cheb, chmp->chm_ebh->eb_size - free); chfs_change_size_wasted(chmp, cheb, -wasted); KASSERT(cheb->dirty_size == 0); KASSERT(cheb->unchecked_size == 0); KASSERT(cheb->used_size == 0); KASSERT(cheb->free_size == chmp->chm_ebh->eb_size); KASSERT(cheb->wasted_size == 0); cheb->first_node = NULL; cheb->last_node = NULL; //put it to free_queue TAILQ_INSERT_TAIL(&chmp->chm_free_queue, cheb, queue); chmp->chm_nr_free_blocks++; dbg("remaped (free: %d, erasable: %d)\n", chmp->chm_nr_free_blocks, chmp->chm_nr_erasable_blocks); KASSERT(!TAILQ_EMPTY(&chmp->chm_free_queue)); return 0; }
static int chfs_unmount(struct mount *mp, int mntflags) { int flags = 0, i = 0; struct ufsmount *ump; struct chfs_mount *chmp; if (mntflags & MNT_FORCE) flags |= FORCECLOSE; dbg("[START]\n"); ump = VFSTOUFS(mp); chmp = ump->um_chfs; /* Stop GC. */ chfs_gc_thread_stop(chmp); /* Flush everyt buffer. */ (void)vflush(mp, NULLVP, flags); if (chmp->chm_wbuf_len) { mutex_enter(&chmp->chm_lock_mountfields); chfs_flush_pending_wbuf(chmp); mutex_exit(&chmp->chm_lock_mountfields); } /* Free node references. */ for (i = 0; i < chmp->chm_ebh->peb_nr; i++) { chfs_free_node_refs(&chmp->chm_blocks[i]); } /* Destroy vnode cache hashtable. */ chfs_vnocache_hash_destroy(chmp->chm_vnocache_hash); /* Close eraseblock handler. */ ebh_close(chmp->chm_ebh); /* Destroy mutexes. */ rw_destroy(&chmp->chm_lock_wbuf); mutex_destroy(&chmp->chm_lock_vnocache); mutex_destroy(&chmp->chm_lock_sizes); mutex_destroy(&chmp->chm_lock_mountfields); /* Unmount UFS. */ if (ump->um_devvp->v_type != VBAD) { spec_node_setmountedfs(ump->um_devvp, NULL); } vn_lock(ump->um_devvp, LK_EXCLUSIVE | LK_RETRY); (void)VOP_CLOSE(ump->um_devvp, FREAD|FWRITE, NOCRED); vput(ump->um_devvp); mutex_destroy(&ump->um_lock); /* Everything done. */ kmem_free(ump, sizeof(struct ufsmount)); mp->mnt_data = NULL; mp->mnt_flag &= ~MNT_LOCAL; dbg("[END]\n"); return (0); }