示例#1
0
void put_filp(struct file *file)
{
	if (atomic_long_dec_and_test(&file->f_count)) {
		security_file_free(file);
		file_sb_list_del(file);
		file_free(file);
	}
}
void unix_notinflight(struct file *fp)
{
	struct sock *s = unix_get_socket(fp);
	if (s) {
		struct unix_sock *u = unix_sk(s);
		spin_lock(&unix_gc_lock);
		BUG_ON(list_empty(&u->link));
		if (atomic_long_dec_and_test(&u->inflight))
			list_del_init(&u->link);
		unix_tot_inflight--;
		spin_unlock(&unix_gc_lock);
	}
}
int put_io_context(struct io_context *ioc)
{
	if (ioc == NULL)
		return 1;

	BUG_ON(atomic_long_read(&ioc->refcount) == 0);

	if (atomic_long_dec_and_test(&ioc->refcount)) {
		rcu_read_lock();
		cfq_dtor(ioc);
		rcu_read_unlock();

		kmem_cache_free(iocontext_cachep, ioc);
		return 1;
	}
	return 0;
}
/*
 * IO Context helper functions. put_io_context() returns 1 if there are no
 * more users of this io context, 0 otherwise.
 */
int put_io_context(struct io_context *ioc)
{
	if (ioc == NULL)
		return 1;

	BUG_ON(atomic_long_read(&ioc->refcount) == 0);

	if (atomic_long_dec_and_test(&ioc->refcount)) {
		rcu_read_lock();
		if (ioc->aic && ioc->aic->dtor)
			ioc->aic->dtor(ioc->aic);
		hlist_sched_dtor(ioc, &ioc->cic_list);
		hlist_sched_dtor(ioc, &ioc->bfq_cic_list);
		rcu_read_unlock();

		kmem_cache_free(iocontext_cachep, ioc);
		return 1;
	}
	return 0;
}
示例#5
0
void fput(struct file *file)
{
	if (atomic_long_dec_and_test(&file->f_count))
		__fput(file);
}
示例#6
0
文件: acct.c 项目: oscardagrach/linux
static void acct_put(struct bsd_acct_struct *p)
{
	if (atomic_long_dec_and_test(&p->count))
		kfree_rcu(p, rcu);
}
示例#7
0
/**
 * put_io_context - put a reference of io_context
 * @ioc: io_context to put
 * @locked_q: request_queue the caller is holding queue_lock of (hint)
 *
 * Decrement reference count of @ioc and release it if the count reaches
 * zero.  If the caller is holding queue_lock of a queue, it can indicate
 * that with @locked_q.  This is an optimization hint and the caller is
 * allowed to pass in %NULL even when it's holding a queue_lock.
 */
void put_io_context(struct io_context *ioc, struct request_queue *locked_q)
{
	struct request_queue *last_q = locked_q;
	unsigned long flags;

	if (ioc == NULL)
		return;

	BUG_ON(atomic_long_read(&ioc->refcount) <= 0);
	if (locked_q)
		lockdep_assert_held(locked_q->queue_lock);

	if (!atomic_long_dec_and_test(&ioc->refcount))
		return;

	/*
	 * Destroy @ioc.  This is a bit messy because icq's are chained
	 * from both ioc and queue, and ioc->lock nests inside queue_lock.
	 * The inner ioc->lock should be held to walk our icq_list and then
	 * for each icq the outer matching queue_lock should be grabbed.
	 * ie. We need to do reverse-order double lock dancing.
	 *
	 * Another twist is that we are often called with one of the
	 * matching queue_locks held as indicated by @locked_q, which
	 * prevents performing double-lock dance for other queues.
	 *
	 * So, we do it in two stages.  The fast path uses the queue_lock
	 * the caller is holding and, if other queues need to be accessed,
	 * uses trylock to avoid introducing locking dependency.  This can
	 * handle most cases, especially if @ioc was performing IO on only
	 * single device.
	 *
	 * If trylock doesn't cut it, we defer to @ioc->release_work which
	 * can do all the double-locking dancing.
	 */
	spin_lock_irqsave_nested(&ioc->lock, flags,
				 ioc_release_depth(locked_q));

	while (!hlist_empty(&ioc->icq_list)) {
		struct io_cq *icq = hlist_entry(ioc->icq_list.first,
						struct io_cq, ioc_node);
		struct request_queue *this_q = icq->q;

		if (this_q != last_q) {
			if (last_q && last_q != locked_q)
				spin_unlock(last_q->queue_lock);
			last_q = NULL;

			if (!spin_trylock(this_q->queue_lock))
				break;
			last_q = this_q;
			continue;
		}
		ioc_exit_icq(icq);
	}

	if (last_q && last_q != locked_q)
		spin_unlock(last_q->queue_lock);

	spin_unlock_irqrestore(&ioc->lock, flags);

	/* if no icq is left, we're done; otherwise, kick release_work */
	if (hlist_empty(&ioc->icq_list))
		kmem_cache_free(iocontext_cachep, ioc);
	else
		schedule_work(&ioc->release_work);
}