static int atomic_exchange_and_add(sp_counted_base_atomic_type volatile * pw, int dv) { return atomic_add_int_nv(&pw->ui,dv)-dv; }
/* * Attempt to build up a hash table for the directory contents in * inode 'ip'. Returns 0 on success, or -1 of the operation failed. */ int ufsdirhash_build(struct inode *ip) { struct dirhash *dh; struct buf *bp = NULL; struct direct *ep; struct vnode *vp; doff_t bmask, pos; int dirblocks, i, j, memreqd, nblocks, narrays, nslots, slot; const int needswap = UFS_MPNEEDSWAP(ip->i_ump); int dirblksiz = ip->i_ump->um_dirblksiz; /* Check if we can/should use dirhash. */ if (ip->i_dirhash == NULL) { if (ip->i_size < (ufs_dirhashminblks * dirblksiz) || OFSFMT(ip)) return (-1); } else { /* Hash exists, but sysctls could have changed. */ if (ip->i_size < (ufs_dirhashminblks * dirblksiz) || ufs_dirhashmem > ufs_dirhashmaxmem) { ufsdirhash_free(ip); return (-1); } /* Check if hash exists and is intact (note: unlocked read). */ if (ip->i_dirhash->dh_hash != NULL) return (0); /* Free the old, recycled hash and build a new one. */ ufsdirhash_free(ip); } /* Don't hash removed directories. */ if (ip->i_nlink == 0) return (-1); vp = ip->i_vnode; /* Allocate 50% more entries than this dir size could ever need. */ KASSERT(ip->i_size >= dirblksiz); nslots = ip->i_size / UFS_DIRECTSIZ(1); nslots = (nslots * 3 + 1) / 2; narrays = howmany(nslots, DH_NBLKOFF); nslots = narrays * DH_NBLKOFF; dirblocks = howmany(ip->i_size, dirblksiz); nblocks = (dirblocks * 3 + 1) / 2; memreqd = sizeof(*dh) + narrays * sizeof(*dh->dh_hash) + narrays * DH_NBLKOFF * sizeof(**dh->dh_hash) + nblocks * sizeof(*dh->dh_blkfree); while (atomic_add_int_nv(&ufs_dirhashmem, memreqd) > ufs_dirhashmaxmem) { atomic_add_int(&ufs_dirhashmem, -memreqd); if (memreqd > ufs_dirhashmaxmem / 2) return (-1); /* Try to free some space. */ if (ufsdirhash_recycle(memreqd) != 0) return (-1); else DIRHASHLIST_UNLOCK(); } /* * Use non-blocking mallocs so that we will revert to a linear * lookup on failure rather than potentially blocking forever. */ dh = pool_cache_get(ufsdirhash_cache, PR_NOWAIT); if (dh == NULL) { atomic_add_int(&ufs_dirhashmem, -memreqd); return (-1); } memset(dh, 0, sizeof(*dh)); mutex_init(&dh->dh_lock, MUTEX_DEFAULT, IPL_NONE); DIRHASH_LOCK(dh); dh->dh_hashsz = narrays * sizeof(dh->dh_hash[0]); dh->dh_hash = kmem_zalloc(dh->dh_hashsz, KM_NOSLEEP); dh->dh_blkfreesz = nblocks * sizeof(dh->dh_blkfree[0]); dh->dh_blkfree = kmem_zalloc(dh->dh_blkfreesz, KM_NOSLEEP); if (dh->dh_hash == NULL || dh->dh_blkfree == NULL) goto fail; for (i = 0; i < narrays; i++) { if ((dh->dh_hash[i] = DIRHASH_BLKALLOC()) == NULL) goto fail; for (j = 0; j < DH_NBLKOFF; j++) dh->dh_hash[i][j] = DIRHASH_EMPTY; } /* Initialise the hash table and block statistics. */ dh->dh_narrays = narrays; dh->dh_hlen = nslots; dh->dh_nblk = nblocks; dh->dh_dirblks = dirblocks; for (i = 0; i < dirblocks; i++) dh->dh_blkfree[i] = dirblksiz / DIRALIGN; for (i = 0; i < DH_NFSTATS; i++) dh->dh_firstfree[i] = -1; dh->dh_firstfree[DH_NFSTATS] = 0; dh->dh_seqopt = 0; dh->dh_seqoff = 0; dh->dh_score = DH_SCOREINIT; ip->i_dirhash = dh; bmask = VFSTOUFS(vp->v_mount)->um_mountp->mnt_stat.f_iosize - 1; pos = 0; while (pos < ip->i_size) { if ((curcpu()->ci_schedstate.spc_flags & SPCF_SHOULDYIELD) != 0) { preempt(); } /* If necessary, get the next directory block. */ if ((pos & bmask) == 0) { if (bp != NULL) brelse(bp, 0); if (ufs_blkatoff(vp, (off_t)pos, NULL, &bp, false) != 0) goto fail; } /* Add this entry to the hash. */ ep = (struct direct *)((char *)bp->b_data + (pos & bmask)); if (ep->d_reclen == 0 || ep->d_reclen > dirblksiz - (pos & (dirblksiz - 1))) { /* Corrupted directory. */ brelse(bp, 0); goto fail; } if (ep->d_ino != 0) { /* Add the entry (simplified ufsdirhash_add). */ slot = ufsdirhash_hash(dh, ep->d_name, ep->d_namlen); while (DH_ENTRY(dh, slot) != DIRHASH_EMPTY) slot = WRAPINCR(slot, dh->dh_hlen); dh->dh_hused++; DH_ENTRY(dh, slot) = pos; ufsdirhash_adjfree(dh, pos, -UFS_DIRSIZ(0, ep, needswap), dirblksiz); } pos += ep->d_reclen; } if (bp != NULL) brelse(bp, 0); DIRHASHLIST_LOCK(); TAILQ_INSERT_TAIL(&ufsdirhash_list, dh, dh_list); dh->dh_onlist = 1; DIRHASH_UNLOCK(dh); DIRHASHLIST_UNLOCK(); return (0); fail: DIRHASH_UNLOCK(dh); if (dh->dh_hash != NULL) { for (i = 0; i < narrays; i++) if (dh->dh_hash[i] != NULL) DIRHASH_BLKFREE(dh->dh_hash[i]); kmem_free(dh->dh_hash, dh->dh_hashsz); } if (dh->dh_blkfree != NULL) kmem_free(dh->dh_blkfree, dh->dh_blkfreesz); mutex_destroy(&dh->dh_lock); pool_cache_put(ufsdirhash_cache, dh); ip->i_dirhash = NULL; atomic_add_int(&ufs_dirhashmem, -memreqd); return (-1); }