示例#1
0
/**
 * __bfq_exit_single_io_context - deassociate @cic from any running task.
 * @bfqd: bfq_data on which @cic is valid.
 * @cic: the cic being exited.
 *
 * Whenever no more tasks are using @cic or @bfqd is deallocated we
 * need to invalidate its entry in the radix tree hash table and to
 * release the queues it refers to.
 *
 * Called under the queue lock.
 */
static void __bfq_exit_single_io_context(struct bfq_data *bfqd,
					 struct cfq_io_context *cic)
{
	struct io_context *ioc = cic->ioc;

	list_del_init(&cic->queue_list);

	/*
	 * Make sure dead mark is seen for dead queues
	 */
	smp_wmb();
	rcu_assign_pointer(cic->key, bfqd_dead_key(bfqd));

	/*
	 * No write-side locking as no task is using @ioc (they're exited
	 * or bfqd is being deallocated.
	 */
	if (ioc->ioc_data == cic)
		rcu_assign_pointer(ioc->ioc_data, NULL);

	if (cic->cfqq[BLK_RW_ASYNC] != NULL) {
		bfq_exit_bfqq(bfqd, cic->cfqq[BLK_RW_ASYNC]);
		cic->cfqq[BLK_RW_ASYNC] = NULL;
	}

	if (cic->cfqq[BLK_RW_SYNC] != NULL) {
		bfq_exit_bfqq(bfqd, cic->cfqq[BLK_RW_SYNC]);
		cic->cfqq[BLK_RW_SYNC] = NULL;
	}
}
示例#2
0
/**
 * bfq_drop_dead_cic - free an exited cic.
 * @bfqd: bfq data for the device in use.
 * @ioc: io_context owning @cic.
 * @cic: the @cic to free.
 *
 * We drop cfq io contexts lazily, so we may find a dead one.
 */
static void bfq_drop_dead_cic(struct bfq_data *bfqd, struct io_context *ioc,
			      struct cfq_io_context *cic)
{
	unsigned long flags;

	WARN_ON(!list_empty(&cic->queue_list));
	BUG_ON(cic->key != bfqd_dead_key(bfqd));

	spin_lock_irqsave(&ioc->lock, flags);

	BUG_ON(ioc->ioc_data == cic);

	/*
	 * With shared I/O contexts two lookups may race and drop the
	 * same cic more than one time: RCU guarantees that the storage
	 * will not be freed too early, here we make sure that we do
	 * not try to remove the cic from the hashing structures multiple
	 * times.
	 */
	if (!hlist_unhashed(&cic->cic_list)) {
		radix_tree_delete(&ioc->bfq_radix_root, bfqd->cic_index);
		hlist_del_init_rcu(&cic->cic_list);
		bfq_cic_free(cic);
	}

	spin_unlock_irqrestore(&ioc->lock, flags);
}
示例#3
0
/**
 * __bfq_exit_single_io_context - deassociate @cic from any running task.
 * @bfqd: bfq_data on which @cic is valid.
 * @cic: the cic being exited.
 *
 * Whenever no more tasks are using @cic or @bfqd is deallocated we
 * need to invalidate its entry in the radix tree hash table and to
 * release the queues it refers to.
 *
 * Called under the queue lock.
 */
static void __bfq_exit_single_io_context(struct bfq_data *bfqd,
        struct cfq_io_context *cic)
{
    struct io_context *ioc = cic->ioc;

    list_del_init(&cic->queue_list);

    /*
     * Make sure dead mark is seen for dead queues
     */
    smp_wmb();
    rcu_assign_pointer(cic->key, bfqd_dead_key(bfqd));

    /*
     * No write-side locking as no task is using @ioc (they're exited
     * or bfqd is being deallocated.
     */
    rcu_read_lock();
    if (rcu_dereference(ioc->ioc_data) == cic) {
        rcu_read_unlock();
        spin_lock(&ioc->lock);
        rcu_assign_pointer(ioc->ioc_data, NULL);
        spin_unlock(&ioc->lock);
    } else
        rcu_read_unlock();

    if (cic->cfqq[BLK_RW_ASYNC] != NULL) {
        bfq_exit_bfqq(bfqd, cic->cfqq[BLK_RW_ASYNC]);
        cic->cfqq[BLK_RW_ASYNC] = NULL;
    }

    spin_lock(&bfqd->eqm_lock);
    if (cic->cfqq[BLK_RW_SYNC] != NULL) {
        /*
         * If the bic is using a shared queue, put the reference
         * taken on the io_context when the bic started using a
         * shared bfq_queue.
         */
        if (bfq_bfqq_coop(cic->cfqq[BLK_RW_SYNC]))
            put_io_context(ioc);
        bfq_exit_bfqq(bfqd, cic->cfqq[BLK_RW_SYNC]);
        cic->cfqq[BLK_RW_SYNC] = NULL;
    }
    spin_unlock(&bfqd->eqm_lock);
}
/**
 * __bfq_exit_single_io_context - deassociate @cic from any running task.
 * @bfqd: bfq_data on which @cic is valid.
 * @cic: the cic being exited.
 *
 * Whenever no more tasks are using @cic or @bfqd is deallocated we
 * need to invalidate its entry in the radix tree hash table and to
 * release the queues it refers to.
 *
 * Called under the queue lock.
 */
static void __bfq_exit_single_io_context(struct bfq_data *bfqd,
        struct cfq_io_context *cic)
{
    struct io_context *ioc = cic->ioc;

    list_del_init(&cic->queue_list);

    /*
     * Make sure dead mark is seen for dead queues
     */
    smp_wmb();
    rcu_assign_pointer(cic->key, bfqd_dead_key(bfqd));

    /*
     * No write-side locking as no task is using @ioc (they're exited
     * or bfqd is being deallocated.
     */
    <<<<<<< HEAD