Exemplo n.º 1
0
/*
 * Destroy the inode hash table.
 */
void
ufs_ihashuninit()
{

	hashdestroy(ihashtbl, M_UFSIHASH, ihash);
	mtx_destroy(&ufs_ihash_mtx);
}
Exemplo n.º 2
0
/*
 * Destroy inode hash table.
 */
void
ntfs_nthashdestroy(void)
{
	hashdestroy(ntfs_nthashtbl, M_NTFSNTHASH, ntfs_nthash);
	lockdestroy(&ntfs_hashlock);
	mtx_destroy(&ntfs_nthash_mtx);
}
Exemplo n.º 3
0
void drm_ht_remove(struct drm_open_hash *ht)
{
	if (ht->table) {
		hashdestroy(ht->table, DRM_MEM_HASHTAB, ht->mask);
		ht->table = NULL;
	}
}
Exemplo n.º 4
0
int
fdesc_uninit(struct vfsconf *vfsp)
{
	if (fdhashtbl)
		hashdestroy(fdhashtbl, M_CACHE, fdhash);
	return (0);
}
Exemplo n.º 5
0
/*
 * Stop per-filesystem syncer process
 */
void
vn_syncer_thr_stop(struct mount *mp)
{
	struct syncer_ctx *ctx;

	ctx = mp->mnt_syncer_ctx;
	if (ctx == NULL)
		return;

	lwkt_gettoken(&ctx->sc_token);

	/* Signal the syncer process to exit */
	ctx->sc_flags |= SC_FLAG_EXIT;
	wakeup(ctx);
	
	/* Wait till syncer process exits */
	while ((ctx->sc_flags & SC_FLAG_DONE) == 0) 
		tsleep(&ctx->sc_flags, 0, "syncexit", hz);

	mp->mnt_syncer_ctx = NULL;
	lwkt_reltoken(&ctx->sc_token);

	hashdestroy(ctx->syncer_workitem_pending, M_DEVBUF, ctx->syncer_mask);
	kfree(ctx, M_TEMP);
}
Exemplo n.º 6
0
void
ip6_destroy()
{

	hashdestroy(V_in6_ifaddrhashtbl, M_IFADDR, V_in6_ifaddrhmask);
	nd6_destroy();
	callout_drain(&V_in6_tmpaddrtimer_ch);
}
Exemplo n.º 7
0
/*
 * Uninit ready for unload.
 */
int
fdesc_uninit(struct vfsconf *vfsp)
{

	hashdestroy(fdhashtbl, M_CACHE, fdhash);
	mtx_destroy(&fdesc_hashmtx);
	return (0);
}
Exemplo n.º 8
0
void
ip6_destroy()
{
    int i;

    if ((i = pfil_head_unregister(&V_inet6_pfil_hook)) != 0)
        printf("%s: WARNING: unable to unregister pfil hook, "
               "error %d\n", __func__, i);
    hashdestroy(V_in6_ifaddrhashtbl, M_IFADDR, V_in6_ifaddrhmask);
    nd6_destroy();
    callout_drain(&V_in6_tmpaddrtimer_ch);
}
Exemplo n.º 9
0
/*
 * Shut down the quota system.
 */
void
dquninit(void)
{
    struct dquot *dq;

    hashdestroy(dqhashtbl, M_DQUOT, dqhash);
    while ((dq = TAILQ_FIRST(&dqfreelist)) != NULL) {
        TAILQ_REMOVE(&dqfreelist, dq, dq_freelist);
        mtx_destroy(&dq->dq_lock);
        free(dq, M_DQUOT);
    }
    mtx_destroy(&dqhlock);
}
Exemplo n.º 10
0
void
in_pcbgroup_destroy(struct inpcbinfo *pcbinfo)
{
	struct inpcbgroup *pcbgroup;
	u_int pgn;

	if (pcbinfo->ipi_npcbgroups == 0)
		return;

	for (pgn = 0; pgn < pcbinfo->ipi_npcbgroups; pgn++) {
		pcbgroup = &pcbinfo->ipi_pcbgroups[pgn];
		KASSERT(LIST_EMPTY(pcbinfo->ipi_listhead),
		    ("in_pcbinfo_destroy: listhead not empty"));
		INP_GROUP_LOCK_DESTROY(pcbgroup);
		hashdestroy(pcbgroup->ipg_hashbase, M_PCB,
		    pcbgroup->ipg_hashmask);
	}
	hashdestroy(pcbinfo->ipi_wildbase, M_PCB, pcbinfo->ipi_wildmask);
	free(pcbinfo->ipi_pcbgroups, M_PCB);
	pcbinfo->ipi_pcbgroups = NULL;
	pcbinfo->ipi_npcbgroups = 0;
	pcbinfo->ipi_hashfields = 0;
}
Exemplo n.º 11
0
static void
ksem_module_destroy(void)
{

#ifdef COMPAT_FREEBSD32
	syscall32_helper_unregister(ksem32_syscalls);
#endif
	syscall_helper_unregister(ksem_syscalls);

	hashdestroy(ksem_dictionary, M_KSEM, ksem_hash);
	sx_destroy(&ksem_dict_lock);
	mtx_destroy(&ksem_count_lock);
	mtx_destroy(&sem_lock);
	p31b_unsetcfg(CTL_P1003_1B_SEM_VALUE_MAX);
	p31b_unsetcfg(CTL_P1003_1B_SEM_NSEMS_MAX);
}
Exemplo n.º 12
0
void
drm_gem_names_fini(struct drm_gem_names *names)
{
	struct drm_gem_name *np;
	int i;

	mtx_lock(&names->lock);
	for (i = 0; i <= names->hash_mask; i++) {
		while ((np = LIST_FIRST(&names->names_hash[i])) != NULL) {
			drm_gem_names_delete_name(names, np);
			mtx_lock(&names->lock);
		}
	}
	mtx_unlock(&names->lock);
	mtx_destroy(&names->lock);
	hashdestroy(names->names_hash, M_GEM_NAMES, names->hash_mask);
	delete_unrhdr(names->unr);
}
Exemplo n.º 13
0
/* Unmount the filesystem described by mp. */
static int
smbfs_unmount(struct mount *mp, int mntflags)
{
	struct smbmount *smp = VFSTOSMBFS(mp);
	struct smb_cred scred;
	int error, flags;

	SMBVDEBUG("smbfs_unmount: flags=%04x\n", mntflags);
	flags = 0;
	if (mntflags & MNT_FORCE)
		flags |= FORCECLOSE;
	/*
	 * Keep trying to flush the vnode list for the mount while 
	 * some are still busy and we are making progress towards
	 * making them not busy. This is needed because smbfs vnodes
	 * reference their parent directory but may appear after their
	 * parent in the list; one pass over the vnode list is not
	 * sufficient in this case.
	 */
	do {
		smp->sm_didrele = 0;
		/* There is 1 extra root vnode reference from smbfs_mount(). */
		error = vflush(mp, 1, flags);
	} while (error == EBUSY && smp->sm_didrele != 0);
	if (error)
		return error;
	smb_makescred(&scred, curthread, smp->sm_cred);
	error = smb_share_lock(smp->sm_share, LK_EXCLUSIVE);
	if (error)
		goto out;
	smb_share_put(smp->sm_share, &scred);
	mp->mnt_data = (qaddr_t)0;

	if (smp->sm_cred)
		crfree(smp->sm_cred);
	if (smp->sm_hash)
		hashdestroy(smp->sm_hash, M_SMBFSHASH, smp->sm_hashlen);
	lockdestroy(&smp->sm_hashlock);
	kfree(smp, M_SMBFSDATA);
	mp->mnt_flag &= ~MNT_LOCAL;
out:
	return error;
}
Exemplo n.º 14
0
static void
free_tom_data(struct tom_data *td)
{
	KASSERT(TAILQ_EMPTY(&td->toep_list),
	    ("%s: toep_list not empty", __func__));

	if (td->listen_mask != 0)
		hashdestroy(td->listen_hash, M_CXGB, td->listen_mask);

	if (mtx_initialized(&td->toep_list_lock))
		mtx_destroy(&td->toep_list_lock);
	if (mtx_initialized(&td->lctx_hash_lock))
		mtx_destroy(&td->lctx_hash_lock);
	if (mtx_initialized(&td->tid_release_lock))
		mtx_destroy(&td->tid_release_lock);
	if (td->l2t)
		t3_free_l2t(td->l2t);
	free_tid_tabs(&td->tid_maps);
	free(td, M_CXGB);
}
Exemplo n.º 15
0
void
fha_uninit(struct fha_params *softc)
{
    sysctl_ctx_free(&softc->sysctl_ctx);
    hashdestroy(softc->g_fha.hashtable, M_NFS_FHA, softc->g_fha.hashmask);
}
Exemplo n.º 16
0
static int
smbfs_mount(struct mount *mp, char *path, caddr_t data, struct ucred *cred)
{
	struct smbfs_args args; 	  /* will hold data from mount request */
	struct smbmount *smp = NULL;
	struct smb_vc *vcp;
	struct smb_share *ssp = NULL;
	struct vnode *vp;
	struct smb_cred scred;
	int error;
	int hsize;
	char *pc, *pe;

	if (data == NULL) {
		kprintf("missing data argument\n");
		return EINVAL;
	}
	if (mp->mnt_flag & MNT_UPDATE) {
		kprintf("MNT_UPDATE not implemented");
		return EOPNOTSUPP;
	}
	error = copyin(data, (caddr_t)&args, sizeof(struct smbfs_args));
	if (error)
		return error;
	if (args.version != SMBFS_VERSION) {
		kprintf("mount version mismatch: kernel=%d, mount=%d\n",
		    SMBFS_VERSION, args.version);
		return EINVAL;
	}
	smb_makescred(&scred, curthread, cred);
	error = smb_dev2share(args.dev, SMBM_EXEC, &scred, &ssp);
	if (error) {
		kprintf("invalid device handle %d (%d)\n", args.dev, error);
		return error;
	}
	vcp = SSTOVC(ssp);
	smb_share_unlock(ssp, 0);
	mp->mnt_stat.f_iosize = SSTOVC(ssp)->vc_txmax;

	smp = kmalloc(sizeof(*smp), M_SMBFSDATA, M_WAITOK | M_USE_RESERVE | M_ZERO);
	mp->mnt_data = (qaddr_t)smp;
	smp->sm_cred = crhold(cred);

	hsize = vfs_inodehashsize();
	smp->sm_hash = hashinit(hsize, M_SMBFSHASH, &smp->sm_hashlen);
	if (smp->sm_hash == NULL)
		goto bad;
	lockinit(&smp->sm_hashlock, "smbfsh", 0, 0);
	smp->sm_share = ssp;
	smp->sm_root = NULL;
	smp->sm_args = args;
	smp->sm_caseopt = args.caseopt;
	smp->sm_args.file_mode = (smp->sm_args.file_mode &
			    (S_IRWXU|S_IRWXG|S_IRWXO)) | S_IFREG;
	smp->sm_args.dir_mode  = (smp->sm_args.dir_mode &
			    (S_IRWXU|S_IRWXG|S_IRWXO)) | S_IFDIR;

/*	simple_lock_init(&smp->sm_npslock);*/
	pc = mp->mnt_stat.f_mntfromname;
	pe = pc + sizeof(mp->mnt_stat.f_mntfromname);
	bzero(pc, MNAMELEN);
	*pc++ = '/';
	*pc++ = '/';
	pc=index(strncpy(pc, vcp->vc_username, pe - pc - 2), 0);
	if (pc < pe-1) {
		*(pc++) = '@';
		pc = index(strncpy(pc, vcp->vc_srvname, pe - pc - 2), 0);
		if (pc < pe - 1) {
			*(pc++) = '/';
			strncpy(pc, ssp->ss_name, pe - pc - 2);
		}
	}
	/* protect against invalid mount points */
	smp->sm_args.mount_point[sizeof(smp->sm_args.mount_point) - 1] = '\0';
	vfs_getnewfsid(mp);

	vfs_add_vnodeops(mp, &smbfs_vnode_vops, &mp->mnt_vn_norm_ops);

	error = smbfs_root(mp, &vp);
	if (error)
		goto bad;
	vn_unlock(vp);
	SMBVDEBUG("root.v_refcnt = %08x\n", vp->v_refcnt);

#ifdef DIAGNOSTICS
	SMBERROR("mp=%p\n", mp);
#endif
	return error;
bad:
	if (smp) {
		if (smp->sm_cred)
			crfree(smp->sm_cred);
		if (smp->sm_hash)
			hashdestroy(smp->sm_hash, M_SMBFSHASH,
			    smp->sm_hashlen);
		lockdestroy(&smp->sm_hashlock);
		kfree(smp, M_SMBFSDATA);
	}
	if (ssp)
		smb_share_put(ssp, &scred);
	return error;
}
Exemplo n.º 17
0
/*
 * Clean up the unionfs node.
 */
void
unionfs_noderem(struct vnode *vp, struct thread *td)
{
	int		count;
	struct unionfs_node *unp, *unp_t1, *unp_t2;
	struct unionfs_node_hashhead *hd;
	struct unionfs_node_status *unsp, *unsp_tmp;
	struct vnode   *lvp;
	struct vnode   *uvp;
	struct vnode   *dvp;

	/*
	 * Use the interlock to protect the clearing of v_data to
	 * prevent faults in unionfs_lock().
	 */
	VI_LOCK(vp);
	unp = VTOUNIONFS(vp);
	lvp = unp->un_lowervp;
	uvp = unp->un_uppervp;
	dvp = unp->un_dvp;
	unp->un_lowervp = unp->un_uppervp = NULLVP;
	vp->v_vnlock = &(vp->v_lock);
	vp->v_data = NULL;
	vp->v_object = NULL;
	VI_UNLOCK(vp);

	if (lvp != NULLVP)
		VOP_UNLOCK(lvp, LK_RELEASE);
	if (uvp != NULLVP)
		VOP_UNLOCK(uvp, LK_RELEASE);

	if (dvp != NULLVP && unp->un_hash.le_prev != NULL)
		unionfs_rem_cached_vnode(unp, dvp);

	if (lockmgr(vp->v_vnlock, LK_EXCLUSIVE, VI_MTX(vp)) != 0)
		panic("the lock for deletion is unacquirable.");

	if (lvp != NULLVP)
		vrele(lvp);
	if (uvp != NULLVP)
		vrele(uvp);
	if (dvp != NULLVP) {
		vrele(dvp);
		unp->un_dvp = NULLVP;
	}
	if (unp->un_path != NULL) {
		free(unp->un_path, M_UNIONFSPATH);
		unp->un_path = NULL;
	}

	if (unp->un_hashtbl != NULL) {
		for (count = 0; count <= unp->un_hashmask; count++) {
			hd = unp->un_hashtbl + count;
			LIST_FOREACH_SAFE(unp_t1, hd, un_hash, unp_t2) {
				LIST_REMOVE(unp_t1, un_hash);
				unp_t1->un_hash.le_next = NULL;
				unp_t1->un_hash.le_prev = NULL;
			}
		}
		hashdestroy(unp->un_hashtbl, M_UNIONFSHASH, unp->un_hashmask);
	}
Exemplo n.º 18
0
/*
 * Clean up the unionfs node.
 */
void
unionfs_noderem(struct vnode *vp, struct thread *td)
{
	int		vfslocked;
	int		count;
	struct unionfs_node *unp, *unp_t1, *unp_t2;
	struct unionfs_node_hashhead *hd;
	struct unionfs_node_status *unsp, *unsp_tmp;
	struct vnode   *lvp;
	struct vnode   *uvp;
	struct vnode   *dvp;

	/*
	 * Use the interlock to protect the clearing of v_data to
	 * prevent faults in unionfs_lock().
	 */
	VI_LOCK(vp);
	unp = VTOUNIONFS(vp);
	lvp = unp->un_lowervp;
	uvp = unp->un_uppervp;
	dvp = unp->un_dvp;
	unp->un_lowervp = unp->un_uppervp = NULLVP;

	vp->v_vnlock = &(vp->v_lock);
	vp->v_data = NULL;
	lockmgr(vp->v_vnlock, LK_EXCLUSIVE | LK_INTERLOCK, VI_MTX(vp), td);
	if (lvp != NULLVP)
		VOP_UNLOCK(lvp, 0, td);
	if (uvp != NULLVP)
		VOP_UNLOCK(uvp, 0, td);
	vp->v_object = NULL;

	if (dvp != NULLVP && unp->un_hash.le_prev != NULL)
		unionfs_rem_cached_vnode(unp, dvp);

	if (lvp != NULLVP) {
		vfslocked = VFS_LOCK_GIANT(lvp->v_mount);
		vrele(lvp);
		VFS_UNLOCK_GIANT(vfslocked);
	}
	if (uvp != NULLVP) {
		vfslocked = VFS_LOCK_GIANT(uvp->v_mount);
		vrele(uvp);
		VFS_UNLOCK_GIANT(vfslocked);
	}
	if (dvp != NULLVP) {
		vfslocked = VFS_LOCK_GIANT(dvp->v_mount);
		vrele(dvp);
		VFS_UNLOCK_GIANT(vfslocked);
		unp->un_dvp = NULLVP;
	}
	if (unp->un_path != NULL) {
		free(unp->un_path, M_UNIONFSPATH);
		unp->un_path = NULL;
	}

	if (unp->un_hashtbl != NULL) {
		for (count = 0; count <= unp->un_hashmask; count++) {
			hd = unp->un_hashtbl + count;
			LIST_FOREACH_SAFE(unp_t1, hd, un_hash, unp_t2) {
				LIST_REMOVE(unp_t1, un_hash);
				unp_t1->un_hash.le_next = NULL;
				unp_t1->un_hash.le_prev = NULL;
			}
		}
		hashdestroy(unp->un_hashtbl, M_UNIONFSHASH, unp->un_hashmask);
	}