Exemplo n.º 1
0
/**
 * __bfq_exit_single_io_context - deassociate @cic from any running task.
 * @bfqd: bfq_data on which @cic is valid.
 * @cic: the cic being exited.
 *
 * Whenever no more tasks are using @cic or @bfqd is deallocated we
 * need to invalidate its entry in the radix tree hash table and to
 * release the queues it refers to.
 *
 * Called under the queue lock.
 */
static void __bfq_exit_single_io_context(struct bfq_data *bfqd,
					 struct cfq_io_context *cic)
{
	struct io_context *ioc = cic->ioc;

	list_del_init(&cic->queue_list);

	/*
	 * Make sure dead mark is seen for dead queues
	 */
	smp_wmb();
	rcu_assign_pointer(cic->key, bfqd_dead_key(bfqd));

	/*
	 * No write-side locking as no task is using @ioc (they're exited
	 * or bfqd is being deallocated.
	 */
	if (ioc->ioc_data == cic)
		rcu_assign_pointer(ioc->ioc_data, NULL);

	if (cic->cfqq[BLK_RW_ASYNC] != NULL) {
		bfq_exit_bfqq(bfqd, cic->cfqq[BLK_RW_ASYNC]);
		cic->cfqq[BLK_RW_ASYNC] = NULL;
	}

	if (cic->cfqq[BLK_RW_SYNC] != NULL) {
		bfq_exit_bfqq(bfqd, cic->cfqq[BLK_RW_SYNC]);
		cic->cfqq[BLK_RW_SYNC] = NULL;
	}
}
Exemplo n.º 2
0
/**
 * __bfq_exit_single_io_context - deassociate @cic from any running task.
 * @bfqd: bfq_data on which @cic is valid.
 * @cic: the cic being exited.
 *
 * Whenever no more tasks are using @cic or @bfqd is deallocated we
 * need to invalidate its entry in the radix tree hash table and to
 * release the queues it refers to.
 *
 * Called under the queue lock.
 */
static void __bfq_exit_single_io_context(struct bfq_data *bfqd,
        struct cfq_io_context *cic)
{
    struct io_context *ioc = cic->ioc;

    list_del_init(&cic->queue_list);

    /*
     * Make sure dead mark is seen for dead queues
     */
    smp_wmb();
    rcu_assign_pointer(cic->key, bfqd_dead_key(bfqd));

    /*
     * No write-side locking as no task is using @ioc (they're exited
     * or bfqd is being deallocated.
     */
    rcu_read_lock();
    if (rcu_dereference(ioc->ioc_data) == cic) {
        rcu_read_unlock();
        spin_lock(&ioc->lock);
        rcu_assign_pointer(ioc->ioc_data, NULL);
        spin_unlock(&ioc->lock);
    } else
        rcu_read_unlock();

    if (cic->cfqq[BLK_RW_ASYNC] != NULL) {
        bfq_exit_bfqq(bfqd, cic->cfqq[BLK_RW_ASYNC]);
        cic->cfqq[BLK_RW_ASYNC] = NULL;
    }

    spin_lock(&bfqd->eqm_lock);
    if (cic->cfqq[BLK_RW_SYNC] != NULL) {
        /*
         * If the bic is using a shared queue, put the reference
         * taken on the io_context when the bic started using a
         * shared bfq_queue.
         */
        if (bfq_bfqq_coop(cic->cfqq[BLK_RW_SYNC]))
            put_io_context(ioc);
        bfq_exit_bfqq(bfqd, cic->cfqq[BLK_RW_SYNC]);
        cic->cfqq[BLK_RW_SYNC] = NULL;
    }
    spin_unlock(&bfqd->eqm_lock);
}