/* * Find the specified name which should have the specified offset. * Returns a slot number, and panics on failure. * * `dh' must be locked on entry and remains so on return. */ int ufsdirhash_findslot(struct dirhash *dh, char *name, int namelen, doff_t offset) { int slot; mtx_assert(&dh->dh_mtx, MA_OWNED); /* Find the entry. */ DIRHASH_ASSERT(dh->dh_hused < dh->dh_hlen, ("dirhash find full")); slot = ufsdirhash_hash(dh, name, namelen); while (DH_ENTRY(dh, slot) != offset && DH_ENTRY(dh, slot) != DIRHASH_EMPTY) slot = WRAPINCR(slot, dh->dh_hlen); if (DH_ENTRY(dh, slot) != offset) panic("ufsdirhash_findslot: '%.*s' not found", namelen, name); return (slot); }
/* * Find the specified name which should have the specified offset. * Returns a slot number, and panics on failure. * * `dh' must be locked on entry and remains so on return. */ static int ufsdirhash_findslot(struct dirhash *dh, const char *name, int namelen, doff_t offset) { int slot; KASSERT(mutex_owned(&dh->dh_lock)); /* Find the entry. */ KASSERT(dh->dh_hused < dh->dh_hlen); slot = ufsdirhash_hash(dh, name, namelen); while (DH_ENTRY(dh, slot) != offset && DH_ENTRY(dh, slot) != DIRHASH_EMPTY) slot = WRAPINCR(slot, dh->dh_hlen); if (DH_ENTRY(dh, slot) != offset) panic("ufsdirhash_findslot: '%.*s' not found", namelen, name); return (slot); }
/* * Insert information into the hash about a new directory entry. dirp * points to a struct direct containing the entry, and offset specifies * the offset of this entry. */ void ufsdirhash_add(struct inode *ip, struct direct *dirp, doff_t offset) { struct dirhash *dh; int slot; const int needswap = UFS_MPNEEDSWAP(ip->i_ump); int dirblksiz = ip->i_ump->um_dirblksiz; if ((dh = ip->i_dirhash) == NULL) return; DIRHASH_LOCK(dh); if (dh->dh_hash == NULL) { DIRHASH_UNLOCK(dh); ufsdirhash_free(ip); return; } KASSERT(offset < dh->dh_dirblks * dirblksiz); /* * Normal hash usage is < 66%. If the usage gets too high then * remove the hash entirely and let it be rebuilt later. */ if (dh->dh_hused >= (dh->dh_hlen * 3) / 4) { DIRHASH_UNLOCK(dh); ufsdirhash_free(ip); return; } /* Find a free hash slot (empty or deleted), and add the entry. */ slot = ufsdirhash_hash(dh, dirp->d_name, dirp->d_namlen); while (DH_ENTRY(dh, slot) >= 0) slot = WRAPINCR(slot, dh->dh_hlen); if (DH_ENTRY(dh, slot) == DIRHASH_EMPTY) dh->dh_hused++; DH_ENTRY(dh, slot) = offset; /* Update the per-block summary info. */ ufsdirhash_adjfree(dh, offset, -UFS_DIRSIZ(0, dirp, needswap), dirblksiz); DIRHASH_UNLOCK(dh); }
/* * Remove the entry corresponding to the specified slot from the hash array. * * `dh' must be locked on entry and remains so on return. */ void ufsdirhash_delslot(struct dirhash *dh, int slot) { int i; mtx_assert(&dh->dh_mtx, MA_OWNED); /* Mark the entry as deleted. */ DH_ENTRY(dh, slot) = DIRHASH_DEL; /* If this is the end of a chain of DIRHASH_DEL slots, remove them. */ for (i = slot; DH_ENTRY(dh, i) == DIRHASH_DEL; ) i = WRAPINCR(i, dh->dh_hlen); if (DH_ENTRY(dh, i) == DIRHASH_EMPTY) { i = WRAPDECR(i, dh->dh_hlen); while (DH_ENTRY(dh, i) == DIRHASH_DEL) { DH_ENTRY(dh, i) = DIRHASH_EMPTY; dh->dh_hused--; i = WRAPDECR(i, dh->dh_hlen); } DIRHASH_ASSERT(dh->dh_hused >= 0, ("ufsdirhash_delslot neg hlen")); } }
/* * Remove the entry corresponding to the specified slot from the hash array. * * `dh' must be locked on entry and remains so on return. */ static void ufsdirhash_delslot(struct dirhash *dh, int slot) { int i; KASSERT(mutex_owned(&dh->dh_lock)); /* Mark the entry as deleted. */ DH_ENTRY(dh, slot) = DIRHASH_DEL; /* If this is the end of a chain of DIRHASH_DEL slots, remove them. */ for (i = slot; DH_ENTRY(dh, i) == DIRHASH_DEL; ) i = WRAPINCR(i, dh->dh_hlen); if (DH_ENTRY(dh, i) == DIRHASH_EMPTY) { i = WRAPDECR(i, dh->dh_hlen); while (DH_ENTRY(dh, i) == DIRHASH_DEL) { DH_ENTRY(dh, i) = DIRHASH_EMPTY; dh->dh_hused--; i = WRAPDECR(i, dh->dh_hlen); } KASSERT(dh->dh_hused >= 0); } }
/* * Change the offset associated with a directory entry in the hash. Used * when compacting directory blocks. */ void ufsdirhash_move(struct inode *ip, struct direct *dirp, doff_t oldoff, doff_t newoff) { struct dirhash *dh; int slot; if ((dh = ip->i_dirhash) == NULL) return; DIRHASH_LOCK(dh); if (dh->dh_hash == NULL) { DIRHASH_UNLOCK(dh); ufsdirhash_free(ip); return; } KASSERT(oldoff < dh->dh_dirblks * ip->i_ump->um_dirblksiz && newoff < dh->dh_dirblks * ip->i_ump->um_dirblksiz); /* Find the entry, and update the offset. */ slot = ufsdirhash_findslot(dh, dirp->d_name, dirp->d_namlen, oldoff); DH_ENTRY(dh, slot) = newoff; DIRHASH_UNLOCK(dh); }
/* * Find the offset of the specified name within the given inode. * Returns 0 on success, ENOENT if the entry does not exist, or * EJUSTRETURN if the caller should revert to a linear search. * * If successful, the directory offset is stored in *offp, and a * pointer to a struct buf containing the entry is stored in *bpp. If * prevoffp is non-NULL, the offset of the previous entry within * the DIRBLKSIZ-sized block is stored in *prevoffp (if the entry * is the first in a block, the start of the block is used). */ int ufsdirhash_lookup(struct inode *ip, char *name, int namelen, doff_t *offp, struct buf **bpp, doff_t *prevoffp) { struct dirhash *dh, *dh_next; struct direct *dp; struct vnode *vp; struct buf *bp; doff_t blkoff, bmask, offset, prevoff; int i, slot; if ((dh = ip->i_dirhash) == NULL) return (EJUSTRETURN); /* * Move this dirhash towards the end of the list if it has a * score higher than the next entry, and acquire the dh_mtx. * Optimise the case where it's already the last by performing * an unlocked read of the TAILQ_NEXT pointer. * * In both cases, end up holding just dh_mtx. */ if (TAILQ_NEXT(dh, dh_list) != NULL) { DIRHASHLIST_LOCK(); DIRHASH_LOCK(dh); /* * If the new score will be greater than that of the next * entry, then move this entry past it. With both mutexes * held, dh_next won't go away, but its dh_score could * change; that's not important since it is just a hint. */ if (dh->dh_hash != NULL && (dh_next = TAILQ_NEXT(dh, dh_list)) != NULL && dh->dh_score >= dh_next->dh_score) { DIRHASH_ASSERT(dh->dh_onlist, ("dirhash: not on list")); TAILQ_REMOVE(&ufsdirhash_list, dh, dh_list); TAILQ_INSERT_AFTER(&ufsdirhash_list, dh_next, dh, dh_list); } DIRHASHLIST_UNLOCK(); } else { /* Already the last, though that could change as we wait. */ DIRHASH_LOCK(dh); } if (dh->dh_hash == NULL) { DIRHASH_UNLOCK(dh); ufsdirhash_free(ip); return (EJUSTRETURN); } /* Update the score. */ if (dh->dh_score < DH_SCOREMAX) dh->dh_score++; vp = ip->i_vnode; bmask = VFSTOUFS(vp->v_mount)->um_mountp->mnt_stat.f_iosize - 1; blkoff = -1; bp = NULL; restart: slot = ufsdirhash_hash(dh, name, namelen); if (dh->dh_seqopt) { /* * Sequential access optimisation. dh_seqoff contains the * offset of the directory entry immediately following * the last entry that was looked up. Check if this offset * appears in the hash chain for the name we are looking for. */ for (i = slot; (offset = DH_ENTRY(dh, i)) != DIRHASH_EMPTY; i = WRAPINCR(i, dh->dh_hlen)) if (offset == dh->dh_seqoff) break; if (offset == dh->dh_seqoff) { /* * We found an entry with the expected offset. This * is probably the entry we want, but if not, the * code below will turn off seqopt and retry. */ slot = i; } else dh->dh_seqopt = 0; } for (; (offset = DH_ENTRY(dh, slot)) != DIRHASH_EMPTY; slot = WRAPINCR(slot, dh->dh_hlen)) { if (offset == DIRHASH_DEL) continue; DIRHASH_UNLOCK(dh); if (offset < 0 || offset >= DIP(ip, size)) panic("ufsdirhash_lookup: bad offset in hash array"); if ((offset & ~bmask) != blkoff) { if (bp != NULL) brelse(bp); blkoff = offset & ~bmask; if (UFS_BUFATOFF(ip, (off_t)blkoff, NULL, &bp) != 0) return (EJUSTRETURN); } dp = (struct direct *)(bp->b_data + (offset & bmask)); if (dp->d_reclen == 0 || dp->d_reclen > DIRBLKSIZ - (offset & (DIRBLKSIZ - 1))) { /* Corrupted directory. */ brelse(bp); return (EJUSTRETURN); } if (dp->d_namlen == namelen && memcmp(dp->d_name, name, namelen) == 0) { /* Found. Get the prev offset if needed. */ if (prevoffp != NULL) { if (offset & (DIRBLKSIZ - 1)) { prevoff = ufsdirhash_getprev(dp, offset); if (prevoff == -1) { brelse(bp); return (EJUSTRETURN); } } else prevoff = offset; *prevoffp = prevoff; } /* Check for sequential access, and update offset. */ if (dh->dh_seqopt == 0 && dh->dh_seqoff == offset) dh->dh_seqopt = 1; dh->dh_seqoff = offset + DIRSIZ(0, dp); *bpp = bp; *offp = offset; return (0); } DIRHASH_LOCK(dh); if (dh->dh_hash == NULL) { DIRHASH_UNLOCK(dh); if (bp != NULL) brelse(bp); ufsdirhash_free(ip); return (EJUSTRETURN); } /* * When the name doesn't match in the seqopt case, go back * and search normally. */ if (dh->dh_seqopt) { dh->dh_seqopt = 0; goto restart; } } DIRHASH_UNLOCK(dh); if (bp != NULL) brelse(bp); return (ENOENT); }
/* * Attempt to build up a hash table for the directory contents in * inode 'ip'. Returns 0 on success, or -1 of the operation failed. */ int ufsdirhash_build(struct inode *ip) { struct dirhash *dh; struct buf *bp = NULL; struct direct *ep; struct vnode *vp; doff_t bmask, pos; int dirblocks, i, j, memreqd, nblocks, narrays, nslots, slot; /* Check if we can/should use dirhash. */ if (ip->i_dirhash == NULL) { if (DIP(ip, size) < ufs_mindirhashsize || OFSFMT(ip)) return (-1); } else { /* Hash exists, but sysctls could have changed. */ if (DIP(ip, size) < ufs_mindirhashsize || ufs_dirhashmem > ufs_dirhashmaxmem) { ufsdirhash_free(ip); return (-1); } /* Check if hash exists and is intact (note: unlocked read). */ if (ip->i_dirhash->dh_hash != NULL) return (0); /* Free the old, recycled hash and build a new one. */ ufsdirhash_free(ip); } /* Don't hash removed directories. */ if (ip->i_effnlink == 0) return (-1); vp = ip->i_vnode; /* Allocate 50% more entries than this dir size could ever need. */ DIRHASH_ASSERT(DIP(ip, size) >= DIRBLKSIZ, ("ufsdirhash_build size")); nslots = DIP(ip, size) / DIRECTSIZ(1); nslots = (nslots * 3 + 1) / 2; narrays = howmany(nslots, DH_NBLKOFF); nslots = narrays * DH_NBLKOFF; dirblocks = howmany(DIP(ip, size), DIRBLKSIZ); nblocks = (dirblocks * 3 + 1) / 2; memreqd = sizeof(*dh) + narrays * sizeof(*dh->dh_hash) + narrays * DH_NBLKOFF * sizeof(**dh->dh_hash) + nblocks * sizeof(*dh->dh_blkfree); DIRHASHLIST_LOCK(); if (memreqd + ufs_dirhashmem > ufs_dirhashmaxmem) { DIRHASHLIST_UNLOCK(); if (memreqd > ufs_dirhashmaxmem / 2) return (-1); /* Try to free some space. */ if (ufsdirhash_recycle(memreqd) != 0) return (-1); /* Enough was freed, and list has been locked. */ } ufs_dirhashmem += memreqd; DIRHASHLIST_UNLOCK(); /* * Use non-blocking mallocs so that we will revert to a linear * lookup on failure rather than potentially blocking forever. */ dh = malloc(sizeof(*dh), M_DIRHASH, M_NOWAIT|M_ZERO); if (dh == NULL) { DIRHASHLIST_LOCK(); ufs_dirhashmem -= memreqd; DIRHASHLIST_UNLOCK(); return (-1); } dh->dh_hash = mallocarray(narrays, sizeof(dh->dh_hash[0]), M_DIRHASH, M_NOWAIT|M_ZERO); dh->dh_blkfree = mallocarray(nblocks, sizeof(dh->dh_blkfree[0]), M_DIRHASH, M_NOWAIT | M_ZERO); if (dh->dh_hash == NULL || dh->dh_blkfree == NULL) goto fail; for (i = 0; i < narrays; i++) { if ((dh->dh_hash[i] = DIRHASH_BLKALLOC()) == NULL) goto fail; for (j = 0; j < DH_NBLKOFF; j++) dh->dh_hash[i][j] = DIRHASH_EMPTY; } /* Initialise the hash table and block statistics. */ mtx_init(&dh->dh_mtx, IPL_NONE); dh->dh_narrays = narrays; dh->dh_hlen = nslots; dh->dh_nblk = nblocks; dh->dh_dirblks = dirblocks; for (i = 0; i < dirblocks; i++) dh->dh_blkfree[i] = DIRBLKSIZ / DIRALIGN; for (i = 0; i < DH_NFSTATS; i++) dh->dh_firstfree[i] = -1; dh->dh_firstfree[DH_NFSTATS] = 0; dh->dh_seqopt = 0; dh->dh_seqoff = 0; dh->dh_score = DH_SCOREINIT; ip->i_dirhash = dh; bmask = VFSTOUFS(vp->v_mount)->um_mountp->mnt_stat.f_iosize - 1; pos = 0; while (pos < DIP(ip, size)) { /* If necessary, get the next directory block. */ if ((pos & bmask) == 0) { if (bp != NULL) brelse(bp); if (UFS_BUFATOFF(ip, (off_t)pos, NULL, &bp) != 0) goto fail; } /* Add this entry to the hash. */ ep = (struct direct *)((char *)bp->b_data + (pos & bmask)); if (ep->d_reclen == 0 || ep->d_reclen > DIRBLKSIZ - (pos & (DIRBLKSIZ - 1))) { /* Corrupted directory. */ brelse(bp); goto fail; } if (ep->d_ino != 0) { /* Add the entry (simplified ufsdirhash_add). */ slot = ufsdirhash_hash(dh, ep->d_name, ep->d_namlen); while (DH_ENTRY(dh, slot) != DIRHASH_EMPTY) slot = WRAPINCR(slot, dh->dh_hlen); dh->dh_hused++; DH_ENTRY(dh, slot) = pos; ufsdirhash_adjfree(dh, pos, -DIRSIZ(0, ep)); } pos += ep->d_reclen; } if (bp != NULL) brelse(bp); DIRHASHLIST_LOCK(); TAILQ_INSERT_TAIL(&ufsdirhash_list, dh, dh_list); dh->dh_onlist = 1; DIRHASHLIST_UNLOCK(); return (0); fail: if (dh->dh_hash != NULL) { for (i = 0; i < narrays; i++) if (dh->dh_hash[i] != NULL) DIRHASH_BLKFREE(dh->dh_hash[i]); free(dh->dh_hash, M_DIRHASH, 0); } if (dh->dh_blkfree != NULL) free(dh->dh_blkfree, M_DIRHASH, 0); free(dh, M_DIRHASH, 0); ip->i_dirhash = NULL; DIRHASHLIST_LOCK(); ufs_dirhashmem -= memreqd; DIRHASHLIST_UNLOCK(); return (-1); }
/* * Attempt to build up a hash table for the directory contents in * inode 'ip'. Returns 0 on success, or -1 of the operation failed. */ int ufsdirhash_build(struct inode *ip) { struct dirhash *dh; struct buf *bp = NULL; struct direct *ep; struct vnode *vp; doff_t bmask, pos; int dirblocks, i, j, memreqd, nblocks, narrays, nslots, slot; const int needswap = UFS_MPNEEDSWAP(ip->i_ump); int dirblksiz = ip->i_ump->um_dirblksiz; /* Check if we can/should use dirhash. */ if (ip->i_dirhash == NULL) { if (ip->i_size < (ufs_dirhashminblks * dirblksiz) || OFSFMT(ip)) return (-1); } else { /* Hash exists, but sysctls could have changed. */ if (ip->i_size < (ufs_dirhashminblks * dirblksiz) || ufs_dirhashmem > ufs_dirhashmaxmem) { ufsdirhash_free(ip); return (-1); } /* Check if hash exists and is intact (note: unlocked read). */ if (ip->i_dirhash->dh_hash != NULL) return (0); /* Free the old, recycled hash and build a new one. */ ufsdirhash_free(ip); } /* Don't hash removed directories. */ if (ip->i_nlink == 0) return (-1); vp = ip->i_vnode; /* Allocate 50% more entries than this dir size could ever need. */ KASSERT(ip->i_size >= dirblksiz); nslots = ip->i_size / UFS_DIRECTSIZ(1); nslots = (nslots * 3 + 1) / 2; narrays = howmany(nslots, DH_NBLKOFF); nslots = narrays * DH_NBLKOFF; dirblocks = howmany(ip->i_size, dirblksiz); nblocks = (dirblocks * 3 + 1) / 2; memreqd = sizeof(*dh) + narrays * sizeof(*dh->dh_hash) + narrays * DH_NBLKOFF * sizeof(**dh->dh_hash) + nblocks * sizeof(*dh->dh_blkfree); while (atomic_add_int_nv(&ufs_dirhashmem, memreqd) > ufs_dirhashmaxmem) { atomic_add_int(&ufs_dirhashmem, -memreqd); if (memreqd > ufs_dirhashmaxmem / 2) return (-1); /* Try to free some space. */ if (ufsdirhash_recycle(memreqd) != 0) return (-1); else DIRHASHLIST_UNLOCK(); } /* * Use non-blocking mallocs so that we will revert to a linear * lookup on failure rather than potentially blocking forever. */ dh = pool_cache_get(ufsdirhash_cache, PR_NOWAIT); if (dh == NULL) { atomic_add_int(&ufs_dirhashmem, -memreqd); return (-1); } memset(dh, 0, sizeof(*dh)); mutex_init(&dh->dh_lock, MUTEX_DEFAULT, IPL_NONE); DIRHASH_LOCK(dh); dh->dh_hashsz = narrays * sizeof(dh->dh_hash[0]); dh->dh_hash = kmem_zalloc(dh->dh_hashsz, KM_NOSLEEP); dh->dh_blkfreesz = nblocks * sizeof(dh->dh_blkfree[0]); dh->dh_blkfree = kmem_zalloc(dh->dh_blkfreesz, KM_NOSLEEP); if (dh->dh_hash == NULL || dh->dh_blkfree == NULL) goto fail; for (i = 0; i < narrays; i++) { if ((dh->dh_hash[i] = DIRHASH_BLKALLOC()) == NULL) goto fail; for (j = 0; j < DH_NBLKOFF; j++) dh->dh_hash[i][j] = DIRHASH_EMPTY; } /* Initialise the hash table and block statistics. */ dh->dh_narrays = narrays; dh->dh_hlen = nslots; dh->dh_nblk = nblocks; dh->dh_dirblks = dirblocks; for (i = 0; i < dirblocks; i++) dh->dh_blkfree[i] = dirblksiz / DIRALIGN; for (i = 0; i < DH_NFSTATS; i++) dh->dh_firstfree[i] = -1; dh->dh_firstfree[DH_NFSTATS] = 0; dh->dh_seqopt = 0; dh->dh_seqoff = 0; dh->dh_score = DH_SCOREINIT; ip->i_dirhash = dh; bmask = VFSTOUFS(vp->v_mount)->um_mountp->mnt_stat.f_iosize - 1; pos = 0; while (pos < ip->i_size) { if ((curcpu()->ci_schedstate.spc_flags & SPCF_SHOULDYIELD) != 0) { preempt(); } /* If necessary, get the next directory block. */ if ((pos & bmask) == 0) { if (bp != NULL) brelse(bp, 0); if (ufs_blkatoff(vp, (off_t)pos, NULL, &bp, false) != 0) goto fail; } /* Add this entry to the hash. */ ep = (struct direct *)((char *)bp->b_data + (pos & bmask)); if (ep->d_reclen == 0 || ep->d_reclen > dirblksiz - (pos & (dirblksiz - 1))) { /* Corrupted directory. */ brelse(bp, 0); goto fail; } if (ep->d_ino != 0) { /* Add the entry (simplified ufsdirhash_add). */ slot = ufsdirhash_hash(dh, ep->d_name, ep->d_namlen); while (DH_ENTRY(dh, slot) != DIRHASH_EMPTY) slot = WRAPINCR(slot, dh->dh_hlen); dh->dh_hused++; DH_ENTRY(dh, slot) = pos; ufsdirhash_adjfree(dh, pos, -UFS_DIRSIZ(0, ep, needswap), dirblksiz); } pos += ep->d_reclen; } if (bp != NULL) brelse(bp, 0); DIRHASHLIST_LOCK(); TAILQ_INSERT_TAIL(&ufsdirhash_list, dh, dh_list); dh->dh_onlist = 1; DIRHASH_UNLOCK(dh); DIRHASHLIST_UNLOCK(); return (0); fail: DIRHASH_UNLOCK(dh); if (dh->dh_hash != NULL) { for (i = 0; i < narrays; i++) if (dh->dh_hash[i] != NULL) DIRHASH_BLKFREE(dh->dh_hash[i]); kmem_free(dh->dh_hash, dh->dh_hashsz); } if (dh->dh_blkfree != NULL) kmem_free(dh->dh_blkfree, dh->dh_blkfreesz); mutex_destroy(&dh->dh_lock); pool_cache_put(ufsdirhash_cache, dh); ip->i_dirhash = NULL; atomic_add_int(&ufs_dirhashmem, -memreqd); return (-1); }