Example #1
0
static void put_ldisc(struct tty_ldisc *ld)
{
	unsigned long flags;

	if (WARN_ON_ONCE(!ld))
		return;

	/*
	 * If this is the last user, free the ldisc, and
	 * release the ldisc ops.
	 *
	 * We really want an "atomic_dec_and_lock_irqsave()",
	 * but we don't have it, so this does it by hand.
	 */
	local_irq_save(flags);
	if (atomic_dec_and_lock(&ld->users, &tty_ldisc_lock)) {
		struct tty_ldisc_ops *ldo = ld->ops;

		ldo->refcount--;
		module_put(ldo->owner);
		spin_unlock_irqrestore(&tty_ldisc_lock, flags);

		kfree(ld);
		return;
	}
	local_irq_restore(flags);
}
Example #2
0
void auth_domain_put(struct auth_domain *dom)
{
	if (atomic_dec_and_lock(&dom->ref.refcount, &auth_domain_lock)) {
		hlist_del(&dom->hash);
		dom->flavour->domain_release(dom);
		spin_unlock(&auth_domain_lock);
	}
}
Example #3
0
/*
 * put_ipc_ns - drop a reference to an ipc namespace.
 * @ns: the namespace to put
 *
 * If this is the last task in the namespace exiting, and
 * it is dropping the refcount to 0, then it can race with
 * a task in another ipc namespace but in a mounts namespace
 * which has this ipcns's mqueuefs mounted, doing some action
 * with one of the mqueuefs files.  That can raise the refcount.
 * So dropping the refcount, and raising the refcount when
 * accessing it through the VFS, are protected with mq_lock.
 *
 * (Clearly, a task raising the refcount on its own ipc_ns
 * needn't take mq_lock since it can't race with the last task
 * in the ipcns exiting).
 */
void put_ipc_ns(struct ipc_namespace *ns)
{
	if (atomic_dec_and_lock(&ns->count, &mq_lock)) {
		mq_clear_sbinfo(ns);
		spin_unlock(&mq_lock);
		mq_put_mnt(ns);
		free_ipc_ns(ns);
	}
}
Example #4
0
void
put_layout_hdr(struct pnfs_layout_hdr *lo)
{
	struct inode *inode = lo->plh_inode;

	if (atomic_dec_and_lock(&lo->plh_refcount, &inode->i_lock)) {
		destroy_layout_hdr(lo);
		spin_unlock(&inode->i_lock);
	}
}
Example #5
0
/*
 * Decrement the use count and release all resources for an mm.
 */
void mmput(struct mm_struct *mm)
{
	if (atomic_dec_and_lock(&mm->mm_users, &mmlist_lock)) {
		list_del(&mm->mmlist);
		mmlist_nr--;
		spin_unlock(&mmlist_lock);
		exit_aio(mm);
		exit_mmap(mm);
		mmdrop(mm);
	}
}
Example #6
0
/* Wake netif's TX queue
 * We want to be able to nest calls to netif_stop_queue(), since each
 * channel can have an individual stop on the queue.
 */
void efx_wake_queue(struct efx_nic *efx)
{
	local_bh_disable();
	if (atomic_dec_and_lock(&efx->netif_stop_count,
				&efx->netif_stop_lock)) {
		EFX_TRACE(efx, "waking TX queue\n");
		netif_wake_queue(efx->net_dev);
		spin_unlock(&efx->netif_stop_lock);
	}
	local_bh_enable();
}
Example #7
0
void ieee80211_rx_bss_put(struct ieee80211_local *local,
			  struct ieee80211_bss *bss)
{
	local_bh_disable();
	if (!atomic_dec_and_lock(&bss->users, &local->bss_lock)) {
		local_bh_enable();
		return;
	}

	__ieee80211_rx_bss_hash_del(local, bss);
	list_del(&bss->list);
	spin_unlock_bh(&local->bss_lock);
	ieee80211_rx_bss_free(bss);
}
/*
 * release a bundle
 */
void rxrpc_put_bundle(struct rxrpc_transport *trans,
		      struct rxrpc_conn_bundle *bundle)
{
	_enter("%p,%p{%d}",trans, bundle, atomic_read(&bundle->usage));

	if (atomic_dec_and_lock(&bundle->usage, &trans->client_lock)) {
		_debug("Destroy bundle");
		rb_erase(&bundle->node, &trans->bundles);
		spin_unlock(&trans->client_lock);
		ASSERT(list_empty(&bundle->unused_conns));
		ASSERT(list_empty(&bundle->avail_conns));
		ASSERT(list_empty(&bundle->busy_conns));
		ASSERTCMP(bundle->num_conns, ==, 0);
		key_put(bundle->key);
		kfree(bundle);
	}
Example #9
0
void dput(struct dentry *dentry)
{
	if (!dentry)
		return;

repeat:
	if (!atomic_dec_and_lock(&dentry->d_count, &dcache_lock))
		return;

	/* dput on a free dentry? */
	if (!list_empty(&dentry->d_lru))
		BUG();
	/*
	 * AV: ->d_delete() is _NOT_ allowed to block now.
	 */
	if (dentry->d_op && dentry->d_op->d_delete) {
		if (dentry->d_op->d_delete(dentry))
			goto unhash_it;
	}
	/* Unreachable? Get rid of it */
	if (list_empty(&dentry->d_hash))
		goto kill_it;
	list_add(&dentry->d_lru, &dentry_unused);
	dentry_stat.nr_unused++;
	spin_unlock(&dcache_lock);
	return;

unhash_it:
	list_del_init(&dentry->d_hash);

kill_it: {
		struct dentry *parent;
		list_del(&dentry->d_child);
		/* drops the lock, at that point nobody can reach this dentry */
		dentry_iput(dentry);
		parent = dentry->d_parent;
		d_free(dentry);
		if (dentry == parent)
			return;
		dentry = parent;
		goto repeat;
	}
}
Example #10
0
void dm_put(struct mapped_device *md)
{
	struct dm_table *map;

	BUG_ON(test_bit(DMF_FREEING, &md->flags));

	if (atomic_dec_and_lock(&md->holders, &_minor_lock)) {
		map = dm_get_table(md);
		idr_replace(&_minor_idr, MINOR_ALLOCED, dm_disk(md)->first_minor);
		set_bit(DMF_FREEING, &md->flags);
		spin_unlock(&_minor_lock);
		if (!dm_suspended(md)) {
			dm_table_presuspend_targets(map);
			dm_table_postsuspend_targets(map);
		}
		__unbind(md);
		dm_table_put(map);
		free_dev(md);
	}
}
Example #11
0
/**
 * wb_congested_put - put a wb_congested
 * @congested: wb_congested to put
 *
 * Put @congested and destroy it if the refcnt reaches zero.
 */
void wb_congested_put(struct bdi_writeback_congested *congested)
{
	unsigned long flags;

	local_irq_save(flags);
	if (!atomic_dec_and_lock(&congested->refcnt, &cgwb_lock)) {
		local_irq_restore(flags);
		return;
	}

	/* bdi might already have been destroyed leaving @congested unlinked */
	if (congested->bdi) {
		rb_erase(&congested->rb_node,
			 &congested->bdi->cgwb_congested_tree);
		congested->bdi = NULL;
	}

	spin_unlock_irqrestore(&cgwb_lock, flags);
	kfree(congested);
}
Example #12
0
void
put_lseg(struct pnfs_layout_segment *lseg)
{
	struct inode *inode;

	if (!lseg)
		return;

	dprintk("%s: lseg %p ref %d valid %d\n", __func__, lseg,
		atomic_read(&lseg->pls_refcount),
		test_bit(NFS_LSEG_VALID, &lseg->pls_flags));
	inode = lseg->pls_layout->plh_inode;
	if (atomic_dec_and_lock(&lseg->pls_refcount, &inode->i_lock)) {
		LIST_HEAD(free_me);

		put_lseg_common(lseg);
		list_add(&lseg->pls_list, &free_me);
		spin_unlock(&inode->i_lock);
		pnfs_free_lseg_list(&free_me);
	}
}
Example #13
0
void
pnfs_put_lseg(struct pnfs_layout_segment *lseg)
{
	struct pnfs_layout_hdr *lo;
	struct inode *inode;

	if (!lseg)
		return;

	dprintk("%s: lseg %p ref %d valid %d\n", __func__, lseg,
		atomic_read(&lseg->pls_refcount),
		test_bit(NFS_LSEG_VALID, &lseg->pls_flags));
	lo = lseg->pls_layout;
	inode = lo->plh_inode;
	if (atomic_dec_and_lock(&lseg->pls_refcount, &inode->i_lock)) {
		pnfs_get_layout_hdr(lo);
		pnfs_layout_remove_lseg(lo, lseg);
		spin_unlock(&inode->i_lock);
		pnfs_free_lseg(lseg);
		pnfs_put_layout_hdr(lo);
	}
}