Exemple #1
0
static void __loop_update_dio(struct loop_device *lo, bool dio)
{
	struct file *file = lo->lo_backing_file;
	struct address_space *mapping = file->f_mapping;
	struct inode *inode = mapping->host;
	unsigned short sb_bsize = 0;
	unsigned dio_align = 0;
	bool use_dio;

	if (inode->i_sb->s_bdev) {
		sb_bsize = bdev_logical_block_size(inode->i_sb->s_bdev);
		dio_align = sb_bsize - 1;
	}

	/*
	 * We support direct I/O only if lo_offset is aligned with the
	 * logical I/O size of backing device, and the logical block
	 * size of loop is bigger than the backing device's and the loop
	 * needn't transform transfer.
	 *
	 * TODO: the above condition may be loosed in the future, and
	 * direct I/O may be switched runtime at that time because most
	 * of requests in sane appplications should be PAGE_SIZE algined
	 */
	if (dio) {
		if (queue_logical_block_size(lo->lo_queue) >= sb_bsize &&
				!(lo->lo_offset & dio_align) &&
				mapping->a_ops->direct_IO &&
				!lo->transfer)
			use_dio = true;
		else
			use_dio = false;
	} else {
		use_dio = false;
	}

	if (lo->use_dio == use_dio)
		return;

	/* flush dirty pages before changing direct IO */
	vfs_fsync(file, 0);

	/*
	 * The flag of LO_FLAGS_DIRECT_IO is handled similarly with
	 * LO_FLAGS_READ_ONLY, both are set from kernel, and losetup
	 * will get updated by ioctl(LOOP_GET_STATUS)
	 */
	blk_mq_freeze_queue(lo->lo_queue);
	lo->use_dio = use_dio;
	if (use_dio)
		lo->lo_flags |= LO_FLAGS_DIRECT_IO;
	else
		lo->lo_flags &= ~LO_FLAGS_DIRECT_IO;
	blk_mq_unfreeze_queue(lo->lo_queue);
}
Exemple #2
0
static int create_strip_zones(struct mddev *mddev, struct r0conf **private_conf)
{
    int i, c, err;
    sector_t curr_zone_end, sectors;
    struct md_rdev *smallest, *rdev1, *rdev2, *rdev, **dev;
    struct strip_zone *zone;
    int cnt;
    char b[BDEVNAME_SIZE];
    char b2[BDEVNAME_SIZE];
    struct r0conf *conf = kzalloc(sizeof(*conf), GFP_KERNEL);
    unsigned short blksize = 512;

    *private_conf = ERR_PTR(-ENOMEM);
    if (!conf)
        return -ENOMEM;
    rdev_for_each(rdev1, mddev) {
        pr_debug("md/raid0:%s: looking at %s\n",
                 mdname(mddev),
                 bdevname(rdev1->bdev, b));
        c = 0;

        /* round size to chunk_size */
        sectors = rdev1->sectors;
        sector_div(sectors, mddev->chunk_sectors);
        rdev1->sectors = sectors * mddev->chunk_sectors;

        blksize = max(blksize, queue_logical_block_size(
                          rdev1->bdev->bd_disk->queue));

        rdev_for_each(rdev2, mddev) {
            pr_debug("md/raid0:%s:   comparing %s(%llu)"
                     " with %s(%llu)\n",
                     mdname(mddev),
                     bdevname(rdev1->bdev,b),
                     (unsigned long long)rdev1->sectors,
                     bdevname(rdev2->bdev,b2),
                     (unsigned long long)rdev2->sectors);
            if (rdev2 == rdev1) {
                pr_debug("md/raid0:%s:   END\n",
                         mdname(mddev));
                break;
            }
            if (rdev2->sectors == rdev1->sectors) {
                /*
                 * Not unique, don't count it as a new
                 * group
                 */
                pr_debug("md/raid0:%s:   EQUAL\n",
                         mdname(mddev));
                c = 1;
                break;
            }
            pr_debug("md/raid0:%s:   NOT EQUAL\n",
                     mdname(mddev));
        }
static ssize_t queue_logical_block_size_show(struct request_queue *q, char *page)
{
	return queue_var_show(queue_logical_block_size(q), page);
}
/*-----------------------------------------------------------------
 * IO routines that accept a list of pages.
 *---------------------------------------------------------------*/
static void do_region(int rw, unsigned region, struct dm_io_region *where,
		      struct dpages *dp, struct io *io)
{
	struct bio *bio;
	struct page *page;
	unsigned long len;
	unsigned offset;
	unsigned num_bvecs;
	sector_t remaining = where->count;
	struct request_queue *q = bdev_get_queue(where->bdev);
	unsigned short logical_block_size = queue_logical_block_size(q);
	sector_t num_sectors;
	unsigned int uninitialized_var(special_cmd_max_sectors);

	/*
	 * Reject unsupported discard and write same requests.
	 */
	if (rw & REQ_DISCARD)
		special_cmd_max_sectors = q->limits.max_discard_sectors;
	else if (rw & REQ_WRITE_SAME)
		special_cmd_max_sectors = q->limits.max_write_same_sectors;
	if ((rw & (REQ_DISCARD | REQ_WRITE_SAME)) && special_cmd_max_sectors == 0) {
		dec_count(io, region, -EOPNOTSUPP);
		return;
	}

	/*
	 * where->count may be zero if rw holds a flush and we need to
	 * send a zero-sized flush.
	 */
	do {
		/*
		 * Allocate a suitably sized-bio.
		 */
		if ((rw & REQ_DISCARD) || (rw & REQ_WRITE_SAME))
			num_bvecs = 1;
		else
			num_bvecs = min_t(int, BIO_MAX_PAGES,
					  dm_sector_div_up(remaining, (PAGE_SIZE >> SECTOR_SHIFT)));

		bio = bio_alloc_bioset(GFP_NOIO, num_bvecs, io->client->bios);
		bio->bi_iter.bi_sector = where->sector + (where->count - remaining);
		bio->bi_bdev = where->bdev;
		bio->bi_end_io = endio;
		store_io_and_region_in_bio(bio, io, region);

		if (rw & REQ_DISCARD) {
			num_sectors = min_t(sector_t, special_cmd_max_sectors, remaining);
			bio->bi_iter.bi_size = num_sectors << SECTOR_SHIFT;
			remaining -= num_sectors;
		} else if (rw & REQ_WRITE_SAME) {
			/*
			 * WRITE SAME only uses a single page.
			 */
			dp->get_page(dp, &page, &len, &offset);
			bio_add_page(bio, page, logical_block_size, offset);
			num_sectors = min_t(sector_t, special_cmd_max_sectors, remaining);
			bio->bi_iter.bi_size = num_sectors << SECTOR_SHIFT;

			offset = 0;
			remaining -= num_sectors;
			dp->next_page(dp);
		} else while (remaining) {
			/*
			 * Try and add as many pages as possible.
			 */
			dp->get_page(dp, &page, &len, &offset);
			len = min(len, to_bytes(remaining));
			if (!bio_add_page(bio, page, len, offset))
				break;

			offset = 0;
			remaining -= to_sector(len);
			dp->next_page(dp);
		}

		atomic_inc(&io->count);
		submit_bio(rw, bio);
	} while (remaining);
}