static int multipath_issue_flush(request_queue_t *q, struct gendisk *disk, sector_t *error_sector) { mddev_t *mddev = q->queuedata; multipath_conf_t *conf = mddev_to_conf(mddev); int i, ret = 0; rcu_read_lock(); for (i=0; i<mddev->raid_disks && ret == 0; i++) { mdk_rdev_t *rdev = rcu_dereference(conf->multipaths[i].rdev); if (rdev && !test_bit(Faulty, &rdev->flags)) { struct block_device *bdev = rdev->bdev; request_queue_t *r_queue = bdev_get_queue(bdev); if (!r_queue->issue_flush_fn) ret = -EOPNOTSUPP; else { atomic_inc(&rdev->nr_pending); rcu_read_unlock(); ret = r_queue->issue_flush_fn(r_queue, bdev->bd_disk, error_sector); rdev_dec_pending(rdev, mddev); rcu_read_lock(); } } } rcu_read_unlock(); return ret; }
static int multipath_end_request(struct bio *bio, unsigned int bytes_done, int error) { int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags); struct multipath_bh * mp_bh = (struct multipath_bh *)(bio->bi_private); multipath_conf_t *conf = mddev_to_conf(mp_bh->mddev); mdk_rdev_t *rdev = conf->multipaths[mp_bh->path].rdev; if (bio->bi_size) return 1; if (uptodate) multipath_end_bh_io(mp_bh, 0); else if (!bio_rw_ahead(bio)) { /* * oops, IO error: */ char b[BDEVNAME_SIZE]; md_error (mp_bh->mddev, rdev); printk(KERN_ERR "multipath: %s: rescheduling sector %llu\n", bdevname(rdev->bdev,b), (unsigned long long)bio->bi_sector); multipath_reschedule_retry(mp_bh); } else multipath_end_bh_io(mp_bh, error); rdev_dec_pending(rdev, conf->mddev); return 0; }
static void unplug_slaves(mddev_t *mddev) { multipath_conf_t *conf = mddev_to_conf(mddev); int i; rcu_read_lock(); for (i=0; i<mddev->raid_disks; i++) { mdk_rdev_t *rdev = rcu_dereference(conf->multipaths[i].rdev); if (rdev && !test_bit(Faulty, &rdev->flags) && atomic_read(&rdev->nr_pending)) { struct request_queue *r_queue = bdev_get_queue(rdev->bdev); atomic_inc(&rdev->nr_pending); rcu_read_unlock(); blk_unplug(r_queue); rdev_dec_pending(rdev, mddev); rcu_read_lock(); } } rcu_read_unlock(); }
static void multipath_end_request(struct bio *bio, int error) { int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags); struct multipath_bh *mp_bh = bio->bi_private; struct mpconf *conf = mp_bh->mddev->private; struct md_rdev *rdev = conf->multipaths[mp_bh->path].rdev; if (uptodate) multipath_end_bh_io(mp_bh, 0); else if (!bio_rw_flagged(bio, BIO_RW_AHEAD)) { /* * oops, IO error: */ char b[BDEVNAME_SIZE]; md_error (mp_bh->mddev, rdev); printk(KERN_ERR "multipath: %s: rescheduling sector %llu\n", bdevname(rdev->bdev,b), (unsigned long long)bio->bi_sector); multipath_reschedule_retry(mp_bh); } else multipath_end_bh_io(mp_bh, error); rdev_dec_pending(rdev, conf->mddev); }
static void unplug_slaves(mddev_t *mddev) { multipath_conf_t *conf = mddev_to_conf(mddev); int i; rcu_read_lock(); for (i=0; i<mddev->raid_disks; i++) { mdk_rdev_t *rdev = conf->multipaths[i].rdev; if (rdev && !rdev->faulty && atomic_read(&rdev->nr_pending)) { request_queue_t *r_queue = bdev_get_queue(rdev->bdev); atomic_inc(&rdev->nr_pending); rcu_read_unlock(); if (r_queue->unplug_fn) r_queue->unplug_fn(r_queue); rdev_dec_pending(rdev, mddev); rcu_read_lock(); } } rcu_read_unlock(); }
static void unplug_slaves(mddev_t *mddev) { multipath_conf_t *conf = mddev_to_conf(mddev); int i; unsigned long flags; spin_lock_irqsave(&conf->device_lock, flags); for (i=0; i<mddev->raid_disks; i++) { mdk_rdev_t *rdev = conf->multipaths[i].rdev; if (rdev && !rdev->faulty) { request_queue_t *r_queue = bdev_get_queue(rdev->bdev); atomic_inc(&rdev->nr_pending); spin_unlock_irqrestore(&conf->device_lock, flags); if (r_queue->unplug_fn) r_queue->unplug_fn(r_queue); spin_lock_irqsave(&conf->device_lock, flags); rdev_dec_pending(rdev, mddev); } } spin_unlock_irqrestore(&conf->device_lock, flags); }