Пример #1
0
int dm_table_flush_all(struct dm_table *t)
{
	struct list_head *d, *devices = dm_table_get_devices(t);
	int ret = 0;
	unsigned i;

	for (i = 0; i < t->num_targets; i++)
		if (t->targets[i].type->flush)
			t->targets[i].type->flush(&t->targets[i]);

	for (d = devices->next; d != devices; d = d->next) {
		struct dm_dev *dd = list_entry(d, struct dm_dev, list);
		request_queue_t *q = bdev_get_queue(dd->bdev);
		int err;

		if (!q->issue_flush_fn)
			err = -EOPNOTSUPP;
		else
			err = q->issue_flush_fn(q, dd->bdev->bd_disk, NULL);

		if (!ret)
			ret = err;
	}

	return ret;
}
Пример #2
0
static int multipath_issue_flush(request_queue_t *q, struct gendisk *disk,
				 sector_t *error_sector)
{
	mddev_t *mddev = q->queuedata;
	multipath_conf_t *conf = mddev_to_conf(mddev);
	int i, ret = 0;

	rcu_read_lock();
	for (i=0; i<mddev->raid_disks && ret == 0; i++) {
		mdk_rdev_t *rdev = rcu_dereference(conf->multipaths[i].rdev);
		if (rdev && !test_bit(Faulty, &rdev->flags)) {
			struct block_device *bdev = rdev->bdev;
			request_queue_t *r_queue = bdev_get_queue(bdev);

			if (!r_queue->issue_flush_fn)
				ret = -EOPNOTSUPP;
			else {
				atomic_inc(&rdev->nr_pending);
				rcu_read_unlock();
				ret = r_queue->issue_flush_fn(r_queue, bdev->bd_disk,
							      error_sector);
				rdev_dec_pending(rdev, mddev);
				rcu_read_lock();
			}
		}
	}
	rcu_read_unlock();
	return ret;
}
Пример #3
0
static int __init imgrement_init(void)
{
    char* err;
    struct imgrement_device *dev;

    imgrement_device = kzalloc(sizeof(struct imgrement_device), GFP_KERNEL);
    _astgo(imgrement_device != NULL, "Error allocating", err, init_error);
    dev = imgrement_device;

    dev->major = register_blkdev(0, DRIVER_NAME);
    _astgo(dev->major > 0, "Error register block device", err, init_error);

    dev->base_dev = blkdev_get_by_path("/dev/sdb", FMODE_READ, NULL);
    _astgo(dev->base_dev != NULL, "Error getting base block device", err, init_error);

    dev->base_queue = bdev_get_queue(dev->base_dev);
    _astgo(dev->base_queue != NULL, "Error getting queue", err, init_error);
    dev->orig_req_fn = dev->base_queue->make_request_fn;
    dev->base_queue->make_request_fn = trace_request_fn;

    LOG("%s trace initialization succeeded", dev->base_dev->bd_disk->disk_name);
    return 0;

init_error:
    LOG_VAR(err);
    imgrement_exit();
    return -1;
}
Пример #4
0
/**
 * bio_integrity_add_page - Attach integrity metadata
 * @bio:	bio to update
 * @page:	page containing integrity metadata
 * @len:	number of bytes of integrity metadata in page
 * @offset:	start offset within page
 *
 * Description: Attach a page containing integrity metadata to bio.
 */
int bio_integrity_add_page(struct bio *bio, struct page *page,
			   unsigned int len, unsigned int offset)
{
	struct bio_integrity_payload *bip = bio_integrity(bio);
	struct bio_vec *iv;

	if (bip->bip_vcnt >= bip->bip_max_vcnt) {
		printk(KERN_ERR "%s: bip_vec full\n", __func__);
		return 0;
	}

	iv = bip->bip_vec + bip->bip_vcnt;

	if (bip->bip_vcnt &&
	    bvec_gap_to_prev(bdev_get_queue(bio->bi_bdev),
			     &bip->bip_vec[bip->bip_vcnt - 1], offset))
		return 0;

	iv->bv_page = page;
	iv->bv_len = len;
	iv->bv_offset = offset;
	bip->bip_vcnt++;

	return len;
}
Пример #5
0
/**
 * blk_trace_ioctl: - handle the ioctls associated with tracing
 * @bdev:	the block device
 * @cmd:	the ioctl cmd
 * @arg:	the argument data, if any
 *
 **/
int blk_trace_ioctl(struct block_device *bdev, unsigned cmd, char __user *arg)
{
	struct request_queue *q;
	int ret, start = 0;
	char b[BDEVNAME_SIZE];

	q = bdev_get_queue(bdev);
	if (!q)
		return -ENXIO;

	mutex_lock(&bdev->bd_mutex);

	switch (cmd) {
	case BLKTRACESETUP:
		bdevname(bdev, b);
		ret = blk_trace_setup(q, b, bdev->bd_dev, bdev, arg);
		break;
	case BLKTRACESTART:
		start = 1;
	case BLKTRACESTOP:
		ret = blk_trace_startstop(q, start);
		break;
	case BLKTRACETEARDOWN:
		ret = blk_trace_remove(q);
		break;
	default:
		ret = -ENOTTY;
		break;
	}

	mutex_unlock(&bdev->bd_mutex);
	return ret;
}
Пример #6
0
static int multipath_issue_flush(request_queue_t *q, struct gendisk *disk,
				 sector_t *error_sector)
{
	mddev_t *mddev = q->queuedata;
	multipath_conf_t *conf = mddev_to_conf(mddev);
	int i, ret = 0;

	for (i=0; i<mddev->raid_disks; i++) {
		mdk_rdev_t *rdev = conf->multipaths[i].rdev;
		if (rdev && !rdev->faulty) {
			struct block_device *bdev = rdev->bdev;
			request_queue_t *r_queue = bdev_get_queue(bdev);

			if (!r_queue->issue_flush_fn) {
				ret = -EOPNOTSUPP;
				break;
			}

			ret = r_queue->issue_flush_fn(r_queue, bdev->bd_disk, error_sector);
			if (ret)
				break;
		}
	}
	return ret;
}
/**
 * blkdev_issue_discard - queue a discard
 * @bdev:	blockdev to issue discard for
 * @sector:	start sector
 * @nr_sects:	number of sectors to discard
 * @gfp_mask:	memory allocation flags (for bio_alloc)
 * @flags:	BLKDEV_IFL_* flags to control behaviour
 *
 * Description:
 *    Issue a discard request for the sectors in question.
 */
int blkdev_issue_discard(struct block_device *bdev, sector_t sector,
		sector_t nr_sects, gfp_t gfp_mask, unsigned long flags)
{
	DECLARE_COMPLETION_ONSTACK(wait);
	struct request_queue *q = bdev_get_queue(bdev);
	int type = REQ_WRITE | REQ_DISCARD;
	unsigned int max_discard_sectors;
	struct bio_batch bb;
	struct bio *bio;
	int ret = 0;

	if (!q)
		return -ENXIO;

	if (!blk_queue_discard(q))
		return -EOPNOTSUPP;

	/*
	 * Ensure that max_discard_sectors is of the proper
	 * granularity
	 */
	max_discard_sectors = min(q->limits.max_discard_sectors, UINT_MAX >> 9);
	if (unlikely(!max_discard_sectors)) {
		/* Avoid infinite loop below. Being cautious never hurts. */
		return -EOPNOTSUPP;
	} else if (q->limits.discard_granularity) {
		unsigned int disc_sects = q->limits.discard_granularity >> 9;

		max_discard_sectors &= ~(disc_sects - 1);
	}
Пример #8
0
static struct request *get_failover_req(struct emc_handler *h,
					struct bio *bio, struct path *path)
{
	struct request *rq;
	struct block_device *bdev = bio->bi_bdev;
	struct request_queue *q = bdev_get_queue(bdev);

	/* FIXME: Figure out why it fails with GFP_ATOMIC. */
	rq = blk_get_request(q, WRITE, __GFP_WAIT);
	if (!rq) {
		DMERR("dm-emc: get_failover_req: blk_get_request failed");
		return NULL;
	}

	rq->bio = rq->biotail = bio;
	blk_rq_bio_prep(q, rq, bio);

	rq->rq_disk = bdev->bd_contains->bd_disk;

	/* bio backed don't set data */
	rq->buffer = rq->data = NULL;
	/* rq data_len used for pc cmd's request_bufflen */
	rq->data_len = bio->bi_size;

	rq->sense = h->sense;
	memset(rq->sense, 0, SCSI_SENSE_BUFFERSIZE);
	rq->sense_len = 0;

	memset(&rq->cmd, 0, BLK_MAX_CDB);

	rq->timeout = EMC_FAILOVER_TIMEOUT;
	rq->flags |= (REQ_BLOCK_PC | REQ_FAILFAST | REQ_NOMERGE);

	return rq;
}
Пример #9
0
static int sync_request(struct page *page, struct block_device *bdev, int rw)
{
	struct bio bio;
	struct bio_vec bio_vec;
	struct completion complete;

	bio_init(&bio);
	bio.bi_io_vec = &bio_vec;
	bio_vec.bv_page = page;
	bio_vec.bv_len = PAGE_SIZE;
	bio_vec.bv_offset = 0;
	bio.bi_vcnt = 1;
	bio.bi_idx = 0;
	bio.bi_size = PAGE_SIZE;
	bio.bi_bdev = bdev;
	bio.bi_sector = page->index * (PAGE_SIZE >> 9);
	init_completion(&complete);
	bio.bi_private = &complete;
	bio.bi_end_io = request_complete;

	submit_bio(rw, &bio);
	generic_unplug_device(bdev_get_queue(bdev));
	wait_for_completion(&complete);
	return test_bit(BIO_UPTODATE, &bio.bi_flags) ? 0 : -EIO;
}
Пример #10
0
static void emc_pg_init(struct hw_handler *hwh, unsigned bypassed,
			struct path *path)
{
	struct request *rq;
	struct request_queue *q = bdev_get_queue(path->dev->bdev);

	/*
	 * We can either blindly init the pg (then look at the sense),
	 * or we can send some commands to get the state here (then
	 * possibly send the fo cmnd), or we can also have the
	 * initial state passed into us and then get an update here.
	 */
	if (!q) {
		DMINFO("dm-emc: emc_pg_init: no queue");
		goto fail_path;
	}

	/* FIXME: The request should be pre-allocated. */
	rq = emc_trespass_get(hwh->context, path);
	if (!rq) {
		DMERR("dm-emc: emc_pg_init: no rq");
		goto fail_path;
	}

	DMINFO("dm-emc: emc_pg_init: sending switch-over command");
	elv_add_request(q, rq, ELEVATOR_INSERT_FRONT, 1);
	return;

fail_path:
	dm_pg_init_complete(path, MP_FAIL_PATH);
}
Пример #11
0
/*
 * BLKRESETZONE ioctl processing.
 * Called from blkdev_ioctl.
 */
int blkdev_reset_zones_ioctl(struct block_device *bdev, fmode_t mode,
			     unsigned int cmd, unsigned long arg)
{
	void __user *argp = (void __user *)arg;
	struct request_queue *q;
	struct blk_zone_range zrange;

	if (!argp)
		return -EINVAL;

	q = bdev_get_queue(bdev);
	if (!q)
		return -ENXIO;

	if (!blk_queue_is_zoned(q))
		return -ENOTTY;

	if (!capable(CAP_SYS_ADMIN))
		return -EACCES;

	if (!(mode & FMODE_WRITE))
		return -EBADF;

	if (copy_from_user(&zrange, argp, sizeof(struct blk_zone_range)))
		return -EFAULT;

	return blkdev_reset_zones(bdev, zrange.sector, zrange.nr_sectors,
				  GFP_KERNEL);
}
Пример #12
0
static struct request *get_failover_req(struct emc_handler *h,
					struct bio *bio, struct dm_path *path)
{
	struct request *rq;
	struct block_device *bdev = bio->bi_bdev;
	struct request_queue *q = bdev_get_queue(bdev);

	/* FIXME: Figure out why it fails with GFP_ATOMIC. */
	rq = blk_get_request(q, WRITE, __GFP_WAIT);
	if (!rq) {
		DMERR("get_failover_req: blk_get_request failed");
		return NULL;
	}

	blk_rq_append_bio(q, rq, bio);

	rq->sense = h->sense;
	memset(rq->sense, 0, SCSI_SENSE_BUFFERSIZE);
	rq->sense_len = 0;

	rq->timeout = EMC_FAILOVER_TIMEOUT;
	rq->cmd_type = REQ_TYPE_BLOCK_PC;
	rq->cmd_flags |= REQ_FAILFAST | REQ_NOMERGE;

	return rq;
}
Пример #13
0
static int f2fs_ioc_fitrim(struct file *filp, unsigned long arg)
{
	struct inode *inode = file_inode(filp);
	struct super_block *sb = inode->i_sb;
	struct request_queue *q = bdev_get_queue(sb->s_bdev);
	struct fstrim_range range;
	int ret;

	if (!capable(CAP_SYS_ADMIN))
		return -EPERM;

	if (!blk_queue_discard(q))
		return -EOPNOTSUPP;

	if (copy_from_user(&range, (struct fstrim_range __user *)arg,
				sizeof(range)))
		return -EFAULT;

	range.minlen = max((unsigned int)range.minlen,
				q->limits.discard_granularity);
	ret = f2fs_trim_fs(F2FS_SB(sb), &range);
	if (ret < 0)
		return ret;

	if (copy_to_user((struct fstrim_range __user *)arg, &range,
				sizeof(range)))
		return -EFAULT;
	return 0;
}
Пример #14
0
/*
 * hp_sw_get_request - Allocate an HP specific path activation request
 * @path: path on which request will be sent (needed for request queue)
 *
 * The START command is used for path activation request.
 * These arrays are controller-based failover, not LUN based.
 * One START command issued to a single path will fail over all
 * LUNs for the same controller.
 *
 * Possible optimizations
 * 1. Make timeout configurable
 * 2. Preallocate request
 */
static struct request *hp_sw_get_request(struct dm_path *path)
{
    struct request *req;
    struct block_device *bdev = path->dev->bdev;
    struct request_queue *q = bdev_get_queue(bdev);
    struct hp_sw_context *h = path->hwhcontext;

    req = blk_get_request(q, WRITE, GFP_NOIO);
    if (!req)
        goto out;

    req->timeout = 60 * HZ;

    req->errors = 0;
    req->cmd_type = REQ_TYPE_BLOCK_PC;
    req->cmd_flags |= REQ_FAILFAST | REQ_NOMERGE;
    req->end_io_data = path;
    req->sense = h->sense;
    memset(req->sense, 0, SCSI_SENSE_BUFFERSIZE);

    req->cmd[0] = START_STOP;
    req->cmd[4] = 1;
    req->cmd_len = COMMAND_SIZE(req->cmd[0]);

out:
    return req;
}
Пример #15
0
static int tbio_io(struct block_device *bdev,
		 struct tbio_interface  *uptr)
{
	tbio_interface_t inter;
	struct bio *bio = NULL;
	int reading = 0 , writing = 0 ;
	void * buffer = NULL;
	//struct request *rq;
	request_queue_t *q;
	q = bdev_get_queue(Device.bdev);


	if (copy_from_user(&inter , uptr , sizeof(tbio_interface_t))) {
		printk("tbio: copy_from_user\n");
		return -EFAULT;
	}


	if (inter.data_len > (q->max_sectors << 9)) {
		printk("tbio: inter.in_len > q->max_sectors << 9\n");
		return -EIO;
	}

	if (inter.data_len) {
	
		switch (inter.direction) {
			default:
				return -EINVAL;
			case TBIO_TO_DEV:
				writing = 1;
				break;
			case TBIO_FROM_DEV:
				reading = 1;
				break;
		}

		bio = bio_map_user(bdev , (unsigned long )inter.data ,
					inter.data_len , reading);
	
		if(!bio) {
			printk("tbio : bio_map_user failed\n");
			buffer = kmalloc (inter.data_len , q->bounce_gfp | GFP_USER);
			if(!buffer){
				printk("tbio: buffer no memory\n");
				return -1;
			}
			copy_from_user(buffer , inter.data , inter.data_len);
			printk("tbio: buffer %s\n",(char *)buffer);
		}

	}

	send_request(q, bio ,bdev,&inter , writing);

	
	if (bio)
		bio_unmap_user(bio, reading);
	return 0;
}
static sector_t iblock_get_blocks(struct se_device *dev)
{
	struct iblock_dev *ibd = dev->dev_ptr;
	struct block_device *bd = ibd->ibd_bd;
	struct request_queue *q = bdev_get_queue(bd);

	return iblock_emulate_read_cap_with_block_size(dev, bd, q);
}
bool iblock_get_write_cache(struct se_device *dev)
{
	struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
	struct block_device *bd = ib_dev->ibd_bd;
	struct request_queue *q = bdev_get_queue(bd);

	return q->flush_flags & REQ_FLUSH;
}
Пример #18
0
/**
 * blkdev_issue_write_same - queue a write same operation
 * @bdev:	target blockdev
 * @sector:	start sector
 * @nr_sects:	number of sectors to write
 * @gfp_mask:	memory allocation flags (for bio_alloc)
 * @page:	page containing data to write
 *
 * Description:
 *    Issue a write same request for the sectors in question.
 */
int blkdev_issue_write_same(struct block_device *bdev, sector_t sector,
			    sector_t nr_sects, gfp_t gfp_mask,
			    struct page *page)
{
	DECLARE_COMPLETION_ONSTACK(wait);
	struct request_queue *q = bdev_get_queue(bdev);
	unsigned int max_write_same_sectors;
	struct bio_batch bb;
	struct bio *bio;
	int ret = 0;

	if (!q)
		return -ENXIO;

	/* Ensure that max_write_same_sectors doesn't overflow bi_size */
	max_write_same_sectors = UINT_MAX >> 9;

	atomic_set(&bb.done, 1);
	bb.error = 0;
	bb.wait = &wait;

	while (nr_sects) {
		bio = bio_alloc(gfp_mask, 1);
		if (!bio) {
			ret = -ENOMEM;
			break;
		}

		bio->bi_iter.bi_sector = sector;
		bio->bi_end_io = bio_batch_end_io;
		bio->bi_bdev = bdev;
		bio->bi_private = &bb;
		bio->bi_vcnt = 1;
		bio->bi_io_vec->bv_page = page;
		bio->bi_io_vec->bv_offset = 0;
		bio->bi_io_vec->bv_len = bdev_logical_block_size(bdev);

		if (nr_sects > max_write_same_sectors) {
			bio->bi_iter.bi_size = max_write_same_sectors << 9;
			nr_sects -= max_write_same_sectors;
			sector += max_write_same_sectors;
		} else {
			bio->bi_iter.bi_size = nr_sects << 9;
			nr_sects = 0;
		}

		atomic_inc(&bb.done);
		submit_bio(REQ_WRITE | REQ_WRITE_SAME, bio);
	}

	/* Wait for bios in-flight */
	if (!atomic_dec_and_test(&bb.done))
		wait_for_completion_io(&wait);

	if (bb.error)
		return bb.error;
	return ret;
}
Пример #19
0
static int tbio_io(struct block_device *bdev, struct tbio_interface *uptr)
{
	int ret;
	tbio_interface_t inter;
	struct bio *bio = NULL;
	int reading = 0, writing = 0;
	void *buf = NULL;
	struct request_queue *q = bdev_get_queue(bdev);

	if (copy_from_user(&inter, uptr, sizeof(tbio_interface_t))) {
		prk_err("copy_from_user");
		return -EFAULT;
	}

	if (inter.data_len > (q->limits.max_sectors << 9)) {
		prk_err("inter.in_len > q->max_sectors << 9");
		return -EIO;
	}

	if (inter.data_len) {

		switch (inter.direction) {
		default:
			return -EINVAL;
		case TBIO_TO_DEV:
			writing = 1;
			break;
		case TBIO_FROM_DEV:
			reading = 1;
			break;
		}

		bio = bio_map_user(q, bdev, (unsigned long)inter.data,
			inter.data_len, reading, GFP_KERNEL);

		if (!bio) {
			prk_err("bio_map_user failed");
			buf = kmalloc(inter.data_len, q->bounce_gfp | GFP_USER);
			if (!buf) {
				prk_err("buffer no memory");
				return -1;
			}
			ret = copy_from_user(buf, inter.data, inter.data_len);
			if (ret)
				prk_err("copy_from_user() failed");

			prk_info("buffer %s\n, copy_from_user returns '%d'",
				(char *)buf, ret);
		}

	}

	send_request(q, bio, bdev, &inter, writing);

	if (bio)
		bio_unmap_user(bio);
	return 0;
}
Пример #20
0
/**
 * blkdev_nr_zones - Get number of zones
 * @bdev:	Target block device
 *
 * Description:
 *    Return the total number of zones of a zoned block device.
 *    For a regular block device, the number of zones is always 0.
 */
unsigned int blkdev_nr_zones(struct block_device *bdev)
{
	struct request_queue *q = bdev_get_queue(bdev);

	if (!blk_queue_is_zoned(q))
		return 0;

	return __blkdev_nr_zones(q, bdev->bd_part->nr_sects);
}
Пример #21
0
/*
 * BLKREPORTZONE ioctl processing.
 * Called from blkdev_ioctl.
 */
int blkdev_report_zones_ioctl(struct block_device *bdev, fmode_t mode,
			      unsigned int cmd, unsigned long arg)
{
	void __user *argp = (void __user *)arg;
	struct request_queue *q;
	struct blk_zone_report rep;
	struct blk_zone *zones;
	int ret;

	if (!argp)
		return -EINVAL;

	q = bdev_get_queue(bdev);
	if (!q)
		return -ENXIO;

	if (!blk_queue_is_zoned(q))
		return -ENOTTY;

	if (!capable(CAP_SYS_ADMIN))
		return -EACCES;

	if (copy_from_user(&rep, argp, sizeof(struct blk_zone_report)))
		return -EFAULT;

	if (!rep.nr_zones)
		return -EINVAL;

	rep.nr_zones = min(blkdev_nr_zones(bdev), rep.nr_zones);

	zones = kvmalloc_array(rep.nr_zones, sizeof(struct blk_zone),
			       GFP_KERNEL | __GFP_ZERO);
	if (!zones)
		return -ENOMEM;

	ret = blkdev_report_zones(bdev, rep.sector,
				  zones, &rep.nr_zones,
				  GFP_KERNEL);
	if (ret)
		goto out;

	if (copy_to_user(argp, &rep, sizeof(struct blk_zone_report))) {
		ret = -EFAULT;
		goto out;
	}

	if (rep.nr_zones) {
		if (copy_to_user(argp + sizeof(struct blk_zone_report), zones,
				 sizeof(struct blk_zone) * rep.nr_zones))
			ret = -EFAULT;
	}

 out:
	kvfree(zones);

	return ret;
}
Пример #22
0
/*
 * trim a range of the filesystem.
 *
 * Note: the parameters passed from userspace are byte ranges into the
 * filesystem which does not match to the format we use for filesystem block
 * addressing. FSB addressing is sparse (AGNO|AGBNO), while the incoming format
 * is a linear address range. Hence we need to use DADDR based conversions and
 * comparisons for determining the correct offset and regions to trim.
 */
int
xfs_ioc_trim(
	struct xfs_mount		*mp,
	struct fstrim_range __user	*urange)
{
	struct request_queue	*q = bdev_get_queue(mp->m_ddev_targp->bt_bdev);
	unsigned int		granularity = q->limits.discard_granularity;
	struct fstrim_range	range;
	xfs_daddr_t		start, end, minlen;
	xfs_agnumber_t		start_agno, end_agno, agno;
	__uint64_t		blocks_trimmed = 0;
	int			error, last_error = 0;

	if (!capable(CAP_SYS_ADMIN))
		return -XFS_ERROR(EPERM);
	if (!blk_queue_discard(q))
		return -XFS_ERROR(EOPNOTSUPP);
	if (copy_from_user(&range, urange, sizeof(range)))
		return -XFS_ERROR(EFAULT);

	/*
	 * Truncating down the len isn't actually quite correct, but using
	 * BBTOB would mean we trivially get overflows for values
	 * of ULLONG_MAX or slightly lower.  And ULLONG_MAX is the default
	 * used by the fstrim application.  In the end it really doesn't
	 * matter as trimming blocks is an advisory interface.
	 */
	if (range.start >= XFS_FSB_TO_B(mp, mp->m_sb.sb_dblocks) ||
	    range.minlen > XFS_FSB_TO_B(mp, XFS_ALLOC_AG_MAX_USABLE(mp)) ||
	    range.len < mp->m_sb.sb_blocksize)
		return -XFS_ERROR(EINVAL);

	start = BTOBB(range.start);
	end = start + BTOBBT(range.len) - 1;
	minlen = BTOBB(max_t(u64, granularity, range.minlen));

	if (end > XFS_FSB_TO_BB(mp, mp->m_sb.sb_dblocks) - 1)
		end = XFS_FSB_TO_BB(mp, mp->m_sb.sb_dblocks)- 1;

	start_agno = xfs_daddr_to_agno(mp, start);
	end_agno = xfs_daddr_to_agno(mp, end);

	for (agno = start_agno; agno <= end_agno; agno++) {
		error = -xfs_trim_extents(mp, agno, start, end, minlen,
					  &blocks_trimmed);
		if (error)
			last_error = error;
	}

	if (last_error)
		return last_error;

	range.len = XFS_FSB_TO_B(mp, blocks_trimmed);
	if (copy_to_user(urange, &range, sizeof(range)))
		return -XFS_ERROR(EFAULT);
	return 0;
}
struct backing_dev_info *blk_get_backing_dev_info(struct block_device *bdev)
{
	struct backing_dev_info *ret = NULL;
	struct request_queue *q = bdev_get_queue(bdev);

	if (q)
		ret = &q->backing_dev_info;
	return ret;
}
Пример #24
0
/**
 * Decide flush support or not.
 */
void walb_decide_flush_support(struct walb_dev *wdev)
{
	struct request_queue *q;
	const struct request_queue *lq, *dq;
	bool lq_flush, dq_flush, lq_fua, dq_fua;
	ASSERT(wdev);

	/* Get queues. */
	q = wdev->queue;
	ASSERT(q);
	lq = bdev_get_queue(wdev->ldev);
	dq = bdev_get_queue(wdev->ddev);

	/* Get flush/fua flags. */
	lq_flush = lq->flush_flags & REQ_FLUSH;
	dq_flush = dq->flush_flags & REQ_FLUSH;
	lq_fua = lq->flush_flags & REQ_FUA;
	dq_fua = dq->flush_flags & REQ_FUA;
	LOGn("flush/fua flags: log_device %d/%d data_device %d/%d\n",
		lq_flush, lq_fua, dq_flush, dq_fua);

	/* Check REQ_FLUSH/REQ_FUA supports. */
	wdev->support_flush = false;
	wdev->support_fua = false;
	if (lq_flush && dq_flush) {
		uint flush_flags = REQ_FLUSH;
		LOGn("Supports REQ_FLUSH.");
		wdev->support_flush = true;
		if (lq_fua) {
			flush_flags |= REQ_FUA;
			LOGn("Supports REQ_FUA.");
			wdev->support_fua = true;
		}
		blk_queue_flush(q, flush_flags);
		blk_queue_flush_queueable(q, true);
	} else {
		LOGw("REQ_FLUSH is not supported!\n"
			"WalB can not guarantee data consistency"
			"in sudden crashes of underlying devices.\n");
	}
}
Пример #25
0
static void free_pgpaths(struct list_head *pgpaths, struct dm_target *ti)
{
	struct pgpath *pgpath, *tmp;
	struct multipath *m = ti->private;

	list_for_each_entry_safe(pgpath, tmp, pgpaths, list) {
		list_del(&pgpath->list);
		if (m->hw_handler_name)
			scsi_dh_detach(bdev_get_queue(pgpath->path.dev->bdev));
		dm_put_device(ti, pgpath->path.dev);
		free_pgpath(pgpath);
	}
Пример #26
0
int xen_blkbk_discard(struct xenbus_transaction xbt, struct backend_info *be)
{
	struct xenbus_device *dev = be->dev;
	struct xen_blkif *blkif = be->blkif;
	char *type;
	int err;
	int state = 0;

	type = xenbus_read(XBT_NIL, dev->nodename, "type", NULL);
	if (!IS_ERR(type)) {
		if (strncmp(type, "file", 4) == 0) {
			state = 1;
			blkif->blk_backend_type = BLKIF_BACKEND_FILE;
		}
		if (strncmp(type, "phy", 3) == 0) {
			struct block_device *bdev = be->blkif->vbd.bdev;
			struct request_queue *q = bdev_get_queue(bdev);
			if (blk_queue_discard(q)) {
				err = xenbus_printf(xbt, dev->nodename,
					"discard-granularity", "%u",
					q->limits.discard_granularity);
				if (err) {
					xenbus_dev_fatal(dev, err,
						"writing discard-granularity");
					goto kfree;
				}
				err = xenbus_printf(xbt, dev->nodename,
					"discard-alignment", "%u",
					q->limits.discard_alignment);
				if (err) {
					xenbus_dev_fatal(dev, err,
						"writing discard-alignment");
					goto kfree;
				}
				state = 1;
				blkif->blk_backend_type = BLKIF_BACKEND_PHY;
			}
		}
	} else {
		err = PTR_ERR(type);
		xenbus_dev_fatal(dev, err, "reading type");
		goto out;
	}

	err = xenbus_printf(xbt, dev->nodename, "feature-discard",
			    "%d", state);
	if (err)
		xenbus_dev_fatal(dev, err, "writing feature-discard");
kfree:
	kfree(type);
out:
	return err;
}
Пример #27
0
void dm_table_unplug_all(struct dm_table *t)
{
	struct list_head *d, *devices = dm_table_get_devices(t);

	for (d = devices->next; d != devices; d = d->next) {
		struct dm_dev *dd = list_entry(d, struct dm_dev, list);
		request_queue_t *q = bdev_get_queue(dd->bdev);

		if (q->unplug_fn)
			q->unplug_fn(q);
	}
}
Пример #28
0
void unregister_block_device(void)      {
        struct request_queue *blkdev_queue = NULL;

        blkdev_queue = bdev_get_queue(blkdev);

        if ((blkdev_queue->request_fn != NULL) &&
                        (original_request_fn != NULL))  {

                blkdev_queue->request_fn = original_request_fn;
                printk ("Successfully unregistered block device.\n");
        }
}
Пример #29
0
/**
 * blkdev_reset_zones - Reset zones write pointer
 * @bdev:	Target block device
 * @sector:	Start sector of the first zone to reset
 * @nr_sectors:	Number of sectors, at least the length of one zone
 * @gfp_mask:	Memory allocation flags (for bio_alloc)
 *
 * Description:
 *    Reset the write pointer of the zones contained in the range
 *    @sector..@sector+@nr_sectors. Specifying the entire disk sector range
 *    is valid, but the specified range should not contain conventional zones.
 */
int blkdev_reset_zones(struct block_device *bdev,
		       sector_t sector, sector_t nr_sectors,
		       gfp_t gfp_mask)
{
	struct request_queue *q = bdev_get_queue(bdev);
	sector_t zone_sectors;
	sector_t end_sector = sector + nr_sectors;
	struct bio *bio = NULL;
	struct blk_plug plug;
	int ret;

	if (!blk_queue_is_zoned(q))
		return -EOPNOTSUPP;

	if (bdev_read_only(bdev))
		return -EPERM;

	if (!nr_sectors || end_sector > bdev->bd_part->nr_sects)
		/* Out of range */
		return -EINVAL;

	/* Check alignment (handle eventual smaller last zone) */
	zone_sectors = blk_queue_zone_sectors(q);
	if (sector & (zone_sectors - 1))
		return -EINVAL;

	if ((nr_sectors & (zone_sectors - 1)) &&
	    end_sector != bdev->bd_part->nr_sects)
		return -EINVAL;

	blk_start_plug(&plug);
	while (sector < end_sector) {

		bio = blk_next_bio(bio, 0, gfp_mask);
		bio->bi_iter.bi_sector = sector;
		bio_set_dev(bio, bdev);
		bio_set_op_attrs(bio, REQ_OP_ZONE_RESET, 0);

		sector += zone_sectors;

		/* This may take a while, so be nice to others */
		cond_resched();

	}

	ret = submit_bio_wait(bio);
	bio_put(bio);

	blk_finish_plug(&plug);

	return ret;
}
Пример #30
0
static void raid0_unplug(struct request_queue *q)
{
    mddev_t *mddev = q->queuedata;
    raid0_conf_t *conf = mddev_to_conf(mddev);
    mdk_rdev_t **devlist = conf->strip_zone[0].dev;
    int i;

    for (i=0; i<mddev->raid_disks; i++) {
        struct request_queue *r_queue = bdev_get_queue(devlist[i]->bdev);

        blk_unplug(r_queue);
    }
}