Beispiel #1
0
static int multipath_end_request(struct bio *bio, unsigned int bytes_done,
				 int error)
{
	int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
	struct multipath_bh * mp_bh = (struct multipath_bh *)(bio->bi_private);
	multipath_conf_t *conf = mddev_to_conf(mp_bh->mddev);
	mdk_rdev_t *rdev = conf->multipaths[mp_bh->path].rdev;

	if (bio->bi_size)
		return 1;

	if (uptodate)
		multipath_end_bh_io(mp_bh, 0);
	else if (!bio_rw_ahead(bio)) {
		/*
		 * oops, IO error:
		 */
		char b[BDEVNAME_SIZE];
		md_error (mp_bh->mddev, rdev);
		printk(KERN_ERR "multipath: %s: rescheduling sector %llu\n", 
		       bdevname(rdev->bdev,b), 
		       (unsigned long long)bio->bi_sector);
		multipath_reschedule_retry(mp_bh);
	} else
		multipath_end_bh_io(mp_bh, error);
	rdev_dec_pending(rdev, conf->mddev);
	return 0;
}
Beispiel #2
0
/*
 * Careful, this can execute in IRQ contexts as well!
 */
static void multipath_error (mddev_t *mddev, mdk_rdev_t *rdev)
{
	multipath_conf_t *conf = mddev_to_conf(mddev);

	if (conf->working_disks <= 1) {
		/*
		 * Uh oh, we can do nothing if this is our last path, but
		 * first check if this is a queued request for a device
		 * which has just failed.
		 */
		printk(KERN_ALERT 
			"multipath: only one IO path left and IO error.\n");
		/* leave it active... it's all we have */
	} else {
		/*
		 * Mark disk as unusable
		 */
		if (!test_bit(Faulty, &rdev->flags)) {
			char b[BDEVNAME_SIZE];
			clear_bit(In_sync, &rdev->flags);
			set_bit(Faulty, &rdev->flags);
			mddev->sb_dirty = 1;
			conf->working_disks--;
			printk(KERN_ALERT "multipath: IO failure on %s,"
				" disabling IO path. \n	Operation continuing"
				" on %d IO paths.\n",
				bdevname (rdev->bdev,b),
				conf->working_disks);
		}
	}
}
Beispiel #3
0
static int multipath_issue_flush(request_queue_t *q, struct gendisk *disk,
				 sector_t *error_sector)
{
	mddev_t *mddev = q->queuedata;
	multipath_conf_t *conf = mddev_to_conf(mddev);
	int i, ret = 0;

	rcu_read_lock();
	for (i=0; i<mddev->raid_disks && ret == 0; i++) {
		mdk_rdev_t *rdev = rcu_dereference(conf->multipaths[i].rdev);
		if (rdev && !test_bit(Faulty, &rdev->flags)) {
			struct block_device *bdev = rdev->bdev;
			request_queue_t *r_queue = bdev_get_queue(bdev);

			if (!r_queue->issue_flush_fn)
				ret = -EOPNOTSUPP;
			else {
				atomic_inc(&rdev->nr_pending);
				rcu_read_unlock();
				ret = r_queue->issue_flush_fn(r_queue, bdev->bd_disk,
							      error_sector);
				rdev_dec_pending(rdev, mddev);
				rcu_read_lock();
			}
		}
	}
	rcu_read_unlock();
	return ret;
}
Beispiel #4
0
static int multipath_issue_flush(request_queue_t *q, struct gendisk *disk,
				 sector_t *error_sector)
{
	mddev_t *mddev = q->queuedata;
	multipath_conf_t *conf = mddev_to_conf(mddev);
	int i, ret = 0;

	for (i=0; i<mddev->raid_disks; i++) {
		mdk_rdev_t *rdev = conf->multipaths[i].rdev;
		if (rdev && !rdev->faulty) {
			struct block_device *bdev = rdev->bdev;
			request_queue_t *r_queue = bdev_get_queue(bdev);

			if (!r_queue->issue_flush_fn) {
				ret = -EOPNOTSUPP;
				break;
			}

			ret = r_queue->issue_flush_fn(r_queue, bdev->bd_disk, error_sector);
			if (ret)
				break;
		}
	}
	return ret;
}
Beispiel #5
0
/*
 * multipath_end_bh_io() is called when we have finished servicing a multipathed
 * operation and are ready to return a success/failure code to the buffer
 * cache layer.
 */
static void multipath_end_bh_io (struct multipath_bh *mp_bh, int err)
{
	struct bio *bio = mp_bh->master_bio;
	multipath_conf_t *conf = mddev_to_conf(mp_bh->mddev);

	bio_endio(bio, bio->bi_size, err);
	mempool_free(mp_bh, conf->pool);
}
Beispiel #6
0
static void multipath_reschedule_retry (struct multipath_bh *mp_bh)
{
	unsigned long flags;
	mddev_t *mddev = mp_bh->mddev;
	multipath_conf_t *conf = mddev_to_conf(mddev);

	spin_lock_irqsave(&conf->device_lock, flags);
	list_add(&mp_bh->retry_list, &conf->retry_list);
	spin_unlock_irqrestore(&conf->device_lock, flags);
	md_wakeup_thread(mddev->thread);
}
Beispiel #7
0
static void raid0_unplug(struct request_queue *q)
{
    mddev_t *mddev = q->queuedata;
    raid0_conf_t *conf = mddev_to_conf(mddev);
    mdk_rdev_t **devlist = conf->strip_zone[0].dev;
    int i;

    for (i=0; i<mddev->raid_disks; i++) {
        struct request_queue *r_queue = bdev_get_queue(devlist[i]->bdev);

        blk_unplug(r_queue);
    }
}
Beispiel #8
0
static void multipath_status (struct seq_file *seq, mddev_t *mddev)
{
	multipath_conf_t *conf = mddev_to_conf(mddev);
	int i;
	
	seq_printf (seq, " [%d/%d] [", conf->raid_disks,
						 conf->working_disks);
	for (i = 0; i < conf->raid_disks; i++)
		seq_printf (seq, "%s",
			       conf->multipaths[i].rdev && 
			       test_bit(In_sync, &conf->multipaths[i].rdev->flags) ? "U" : "_");
	seq_printf (seq, "]");
}
Beispiel #9
0
static int raid0_congested(void *data, int bits)
{
    mddev_t *mddev = data;
    raid0_conf_t *conf = mddev_to_conf(mddev);
    mdk_rdev_t **devlist = conf->strip_zone[0].dev;
    int i, ret = 0;

    for (i = 0; i < mddev->raid_disks && !ret ; i++) {
        struct request_queue *q = bdev_get_queue(devlist[i]->bdev);

        ret |= bdi_congested(&q->backing_dev_info, bits);
    }
    return ret;
}
Beispiel #10
0
static int create_strip_zones (mddev_t *mddev)
{
    int i, c, j;
    sector_t current_offset, curr_zone_offset;
    sector_t min_spacing;
    raid0_conf_t *conf = mddev_to_conf(mddev);
    mdk_rdev_t *smallest, *rdev1, *rdev2, *rdev;
    struct list_head *tmp1, *tmp2;
    struct strip_zone *zone;
    int cnt;
    char b[BDEVNAME_SIZE];
 
    /*
     * The number of 'same size groups'
     */
    conf->nr_strip_zones = 0;
 
    rdev_for_each(rdev1, tmp1, mddev) {
        printk("raid0: looking at %s\n",
            bdevname(rdev1->bdev,b));
        c = 0;
        rdev_for_each(rdev2, tmp2, mddev) {
            printk("raid0:   comparing %s(%llu)",
                   bdevname(rdev1->bdev,b),
                   (unsigned long long)rdev1->size);
            printk(" with %s(%llu)\n",
                   bdevname(rdev2->bdev,b),
                   (unsigned long long)rdev2->size);
            if (rdev2 == rdev1) {
                printk("raid0:   END\n");
                break;
            }
            if (rdev2->size == rdev1->size)
            {
                /*
                 * Not unique, don't count it as a new
                 * group
                 */
                printk("raid0:   EQUAL\n");
                c = 1;
                break;
            }
            printk("raid0:   NOT EQUAL\n");
        }
static int raid0_issue_flush(request_queue_t *q, struct gendisk *disk,
			     sector_t *error_sector)
{
	mddev_t *mddev = q->queuedata;
	raid0_conf_t *conf = mddev_to_conf(mddev);
	mdk_rdev_t **devlist = conf->strip_zone[0].dev;
	int i, ret = 0;

	for (i=0; i<mddev->raid_disks && ret == 0; i++) {
		struct block_device *bdev = devlist[i]->bdev;
		request_queue_t *r_queue = bdev_get_queue(bdev);

		if (!r_queue->issue_flush_fn)
			ret = -EOPNOTSUPP;
		else
			ret = r_queue->issue_flush_fn(r_queue, bdev->bd_disk, error_sector);
	}
	return ret;
}
static int multipath_make_request (struct request_queue *q, struct bio * bio)
{
	mddev_t *mddev = q->queuedata;
	multipath_conf_t *conf = mddev_to_conf(mddev);
	struct multipath_bh * mp_bh;
	struct multipath_info *multipath;
	const int rw = bio_data_dir(bio);
	int cpu;

	if (unlikely(bio_barrier(bio))) {
		bio_endio(bio, -EOPNOTSUPP);
		return 0;
	}

	mp_bh = mempool_alloc(conf->pool, GFP_NOIO);

	mp_bh->master_bio = bio;
	mp_bh->mddev = mddev;

	cpu = part_stat_lock();
	part_stat_inc(cpu, &mddev->gendisk->part0, ios[rw]);
	part_stat_add(cpu, &mddev->gendisk->part0, sectors[rw],
		      bio_sectors(bio));
	part_stat_unlock();

	mp_bh->path = multipath_map(conf);
	if (mp_bh->path < 0) {
		bio_endio(bio, -EIO);
		mempool_free(mp_bh, conf->pool);
		return 0;
	}
	multipath = conf->multipaths + mp_bh->path;

	mp_bh->bio = *bio;
	mp_bh->bio.bi_sector += multipath->rdev->data_offset;
	mp_bh->bio.bi_bdev = multipath->rdev->bdev;
	mp_bh->bio.bi_rw |= (1 << BIO_RW_FAILFAST_TRANSPORT);
	mp_bh->bio.bi_end_io = multipath_end_request;
	mp_bh->bio.bi_private = mp_bh;
	generic_make_request(&mp_bh->bio);
	return 0;
}
Beispiel #13
0
static int multipath_make_request (request_queue_t *q, struct bio * bio)
{
	mddev_t *mddev = q->queuedata;
	multipath_conf_t *conf = mddev_to_conf(mddev);
	struct multipath_bh * mp_bh;
	struct multipath_info *multipath;

	if (unlikely(bio_barrier(bio))) {
		bio_endio(bio, bio->bi_size, -EOPNOTSUPP);
		return 0;
	}

	mp_bh = mempool_alloc(conf->pool, GFP_NOIO);

	mp_bh->master_bio = bio;
	mp_bh->mddev = mddev;

	if (bio_data_dir(bio)==WRITE) {
		disk_stat_inc(mddev->gendisk, writes);
		disk_stat_add(mddev->gendisk, write_sectors, bio_sectors(bio));
	} else {
		disk_stat_inc(mddev->gendisk, reads);
		disk_stat_add(mddev->gendisk, read_sectors, bio_sectors(bio));
	}

	mp_bh->path = multipath_map(conf);
	if (mp_bh->path < 0) {
		bio_endio(bio, bio->bi_size, -EIO);
		mempool_free(mp_bh, conf->pool);
		return 0;
	}
	multipath = conf->multipaths + mp_bh->path;

	mp_bh->bio = *bio;
	mp_bh->bio.bi_bdev = multipath->rdev->bdev;
	mp_bh->bio.bi_rw |= (1 << BIO_RW_FAILFAST);
	mp_bh->bio.bi_end_io = multipath_end_request;
	mp_bh->bio.bi_private = mp_bh;
	generic_make_request(&mp_bh->bio);
	return 0;
}
Beispiel #14
0
static int multipath_congested(void *data, int bits)
{
	mddev_t *mddev = data;
	multipath_conf_t *conf = mddev_to_conf(mddev);
	int i, ret = 0;

	rcu_read_lock();
	for (i = 0; i < mddev->raid_disks ; i++) {
		mdk_rdev_t *rdev = rcu_dereference(conf->multipaths[i].rdev);
		if (rdev && !test_bit(Faulty, &rdev->flags)) {
			struct request_queue *q = bdev_get_queue(rdev->bdev);

			ret |= bdi_congested(&q->backing_dev_info, bits);
			/* Just like multipath_map, we just check the
			 * first available device
			 */
			break;
		}
	}
	rcu_read_unlock();
	return ret;
}
Beispiel #15
0
static void unplug_slaves(mddev_t *mddev)
{
	multipath_conf_t *conf = mddev_to_conf(mddev);
	int i;

	rcu_read_lock();
	for (i=0; i<mddev->raid_disks; i++) {
		mdk_rdev_t *rdev = rcu_dereference(conf->multipaths[i].rdev);
		if (rdev && !test_bit(Faulty, &rdev->flags)
		    && atomic_read(&rdev->nr_pending)) {
			struct request_queue *r_queue = bdev_get_queue(rdev->bdev);

			atomic_inc(&rdev->nr_pending);
			rcu_read_unlock();

			blk_unplug(r_queue);

			rdev_dec_pending(rdev, mddev);
			rcu_read_lock();
		}
	}
	rcu_read_unlock();
}
Beispiel #16
0
static void unplug_slaves(mddev_t *mddev)
{
	multipath_conf_t *conf = mddev_to_conf(mddev);
	int i;

	rcu_read_lock();
	for (i=0; i<mddev->raid_disks; i++) {
		mdk_rdev_t *rdev = conf->multipaths[i].rdev;
		if (rdev && !rdev->faulty && atomic_read(&rdev->nr_pending)) {
			request_queue_t *r_queue = bdev_get_queue(rdev->bdev);

			atomic_inc(&rdev->nr_pending);
			rcu_read_unlock();

			if (r_queue->unplug_fn)
				r_queue->unplug_fn(r_queue);

			rdev_dec_pending(rdev, mddev);
			rcu_read_lock();
		}
	}
	rcu_read_unlock();
}
Beispiel #17
0
static void unplug_slaves(mddev_t *mddev)
{
	multipath_conf_t *conf = mddev_to_conf(mddev);
	int i;
	unsigned long flags;

	spin_lock_irqsave(&conf->device_lock, flags);
	for (i=0; i<mddev->raid_disks; i++) {
		mdk_rdev_t *rdev = conf->multipaths[i].rdev;
		if (rdev && !rdev->faulty) {
			request_queue_t *r_queue = bdev_get_queue(rdev->bdev);

			atomic_inc(&rdev->nr_pending);
			spin_unlock_irqrestore(&conf->device_lock, flags);

			if (r_queue->unplug_fn)
				r_queue->unplug_fn(r_queue);

			spin_lock_irqsave(&conf->device_lock, flags);
			rdev_dec_pending(rdev, mddev);
		}
	}
	spin_unlock_irqrestore(&conf->device_lock, flags);
}