/** * blkdev_reset_zones - Reset zones write pointer * @bdev: Target block device * @sector: Start sector of the first zone to reset * @nr_sectors: Number of sectors, at least the length of one zone * @gfp_mask: Memory allocation flags (for bio_alloc) * * Description: * Reset the write pointer of the zones contained in the range * @sector..@sector+@nr_sectors. Specifying the entire disk sector range * is valid, but the specified range should not contain conventional zones. */ int blkdev_reset_zones(struct block_device *bdev, sector_t sector, sector_t nr_sectors, gfp_t gfp_mask) { struct request_queue *q = bdev_get_queue(bdev); sector_t zone_sectors; sector_t end_sector = sector + nr_sectors; struct bio *bio = NULL; struct blk_plug plug; int ret; if (!blk_queue_is_zoned(q)) return -EOPNOTSUPP; if (bdev_read_only(bdev)) return -EPERM; if (!nr_sectors || end_sector > bdev->bd_part->nr_sects) /* Out of range */ return -EINVAL; /* Check alignment (handle eventual smaller last zone) */ zone_sectors = blk_queue_zone_sectors(q); if (sector & (zone_sectors - 1)) return -EINVAL; if ((nr_sectors & (zone_sectors - 1)) && end_sector != bdev->bd_part->nr_sects) return -EINVAL; blk_start_plug(&plug); while (sector < end_sector) { bio = blk_next_bio(bio, 0, gfp_mask); bio->bi_iter.bi_sector = sector; bio_set_dev(bio, bdev); bio_set_op_attrs(bio, REQ_OP_ZONE_RESET, 0); sector += zone_sectors; /* This may take a while, so be nice to others */ cond_resched(); } ret = submit_bio_wait(bio); bio_put(bio); blk_finish_plug(&plug); return ret; }
int ext4_encrypted_zeroout(struct inode *inode, struct ext4_extent *ex) { struct ext4_crypto_ctx *ctx; struct page *ciphertext_page = NULL; struct bio *bio; ext4_lblk_t lblk = ex->ee_block; ext4_fsblk_t pblk = ext4_ext_pblock(ex); unsigned int len = ext4_ext_get_actual_len(ex); int err = 0; BUG_ON(inode->i_sb->s_blocksize != PAGE_CACHE_SIZE); ctx = ext4_get_crypto_ctx(inode); if (IS_ERR(ctx)) return PTR_ERR(ctx); ciphertext_page = alloc_bounce_page(ctx); if (IS_ERR(ciphertext_page)) { err = PTR_ERR(ciphertext_page); goto errout; } while (len--) { err = ext4_page_crypto(ctx, inode, EXT4_ENCRYPT, lblk, ZERO_PAGE(0), ciphertext_page); if (err) goto errout; bio = bio_alloc(GFP_KERNEL, 1); if (!bio) { err = -ENOMEM; goto errout; } bio->bi_bdev = inode->i_sb->s_bdev; bio->bi_sector = pblk; err = bio_add_page(bio, ciphertext_page, inode->i_sb->s_blocksize, 0); if (err) { bio_put(bio); goto errout; } err = submit_bio_wait(WRITE, bio); bio_put(bio); if (err) goto errout; } err = 0; errout: ext4_release_crypto_ctx(ctx); return err; }
/** * blkdev_reset_zones - Reset zones write pointer * @bdev: Target block device * @sector: Start sector of the first zone to reset * @nr_sectors: Number of sectors, at least the length of one zone * @gfp_mask: Memory allocation flags (for bio_alloc) * * Description: * Reset the write pointer of the zones contained in the range * @sector..@sector+@nr_sectors. Specifying the entire disk sector range * is valid, but the specified range should not contain conventional zones. */ int blkdev_reset_zones(struct block_device *bdev, sector_t sector, sector_t nr_sectors, gfp_t gfp_mask) { struct request_queue *q = bdev_get_queue(bdev); sector_t zone_sectors; sector_t end_sector = sector + nr_sectors; struct bio *bio; int ret; if (!q) return -ENXIO; if (!blk_queue_is_zoned(q)) return -EOPNOTSUPP; if (end_sector > bdev->bd_part->nr_sects) /* Out of range */ return -EINVAL; /* Check alignment (handle eventual smaller last zone) */ zone_sectors = blk_queue_zone_size(q); if (sector & (zone_sectors - 1)) return -EINVAL; if ((nr_sectors & (zone_sectors - 1)) && end_sector != bdev->bd_part->nr_sects) return -EINVAL; while (sector < end_sector) { bio = bio_alloc(gfp_mask, 0); bio->bi_iter.bi_sector = sector; bio->bi_bdev = bdev; bio_set_op_attrs(bio, REQ_OP_ZONE_RESET, 0); ret = submit_bio_wait(bio); bio_put(bio); if (ret) return ret; sector += zone_sectors; /* This may take a while, so be nice to others */ cond_resched(); } return 0; }
static int sync_request(struct page *page, struct block_device *bdev, int rw) { struct bio bio; struct bio_vec bio_vec; bio_init(&bio); bio.bi_max_vecs = 1; bio.bi_io_vec = &bio_vec; bio_vec.bv_page = page; bio_vec.bv_len = PAGE_SIZE; bio_vec.bv_offset = 0; bio.bi_vcnt = 1; bio.bi_bdev = bdev; bio.bi_iter.bi_sector = page->index * (PAGE_SIZE >> 9); bio.bi_iter.bi_size = PAGE_SIZE; return submit_bio_wait(rw, &bio); }
int ext4_encrypted_zeroout(struct inode *inode, struct ext4_extent *ex) { struct ext4_crypto_ctx *ctx; struct page *ciphertext_page = NULL; struct bio *bio; ext4_lblk_t lblk = le32_to_cpu(ex->ee_block); ext4_fsblk_t pblk = ext4_ext_pblock(ex); unsigned int len = ext4_ext_get_actual_len(ex); int ret, err = 0; #if 0 ext4_msg(inode->i_sb, KERN_CRIT, "ext4_encrypted_zeroout ino %lu lblk %u len %u", (unsigned long) inode->i_ino, lblk, len); #endif BUG_ON(inode->i_sb->s_blocksize != PAGE_CACHE_SIZE); ctx = ext4_get_crypto_ctx(inode); if (IS_ERR(ctx)) return PTR_ERR(ctx); ciphertext_page = alloc_bounce_page(ctx); if (IS_ERR(ciphertext_page)) { err = PTR_ERR(ciphertext_page); goto errout; } while (len--) { err = ext4_page_crypto(inode, EXT4_ENCRYPT, lblk, ZERO_PAGE(0), ciphertext_page); if (err) goto errout; bio = bio_alloc(GFP_KERNEL, 1); if (!bio) { err = -ENOMEM; goto errout; } bio->bi_bdev = inode->i_sb->s_bdev; bio->bi_iter.bi_sector = pblk << (inode->i_sb->s_blocksize_bits - 9); ret = bio_add_page(bio, ciphertext_page, inode->i_sb->s_blocksize, 0); if (ret != inode->i_sb->s_blocksize) { /* should never happen! */ ext4_msg(inode->i_sb, KERN_ERR, "bio_add_page failed: %d", ret); WARN_ON(1); bio_put(bio); err = -EIO; goto errout; } err = submit_bio_wait(WRITE, bio); if ((err == 0) && bio->bi_error) err = -EIO; bio_put(bio); if (err) goto errout; lblk++; pblk++; } err = 0; errout: ext4_release_crypto_ctx(ctx); return err; }
/** * blkdev_report_zones - Get zones information * @bdev: Target block device * @sector: Sector from which to report zones * @zones: Array of zone structures where to return the zones information * @nr_zones: Number of zone structures in the zone array * @gfp_mask: Memory allocation flags (for bio_alloc) * * Description: * Get zone information starting from the zone containing @sector. * The number of zone information reported may be less than the number * requested by @nr_zones. The number of zones actually reported is * returned in @nr_zones. */ int blkdev_report_zones(struct block_device *bdev, sector_t sector, struct blk_zone *zones, unsigned int *nr_zones, gfp_t gfp_mask) { struct request_queue *q = bdev_get_queue(bdev); struct blk_zone_report_hdr *hdr; unsigned int nrz = *nr_zones; struct page *page; unsigned int nr_rep; size_t rep_bytes; unsigned int nr_pages; struct bio *bio; struct bio_vec *bv; unsigned int i, n, nz; unsigned int ofst; void *addr; int ret = 0; if (!q) return -ENXIO; if (!blk_queue_is_zoned(q)) return -EOPNOTSUPP; if (!nrz) return 0; if (sector > bdev->bd_part->nr_sects) { *nr_zones = 0; return 0; } /* * The zone report has a header. So make room for it in the * payload. Also make sure that the report fits in a single BIO * that will not be split down the stack. */ rep_bytes = sizeof(struct blk_zone_report_hdr) + sizeof(struct blk_zone) * nrz; rep_bytes = (rep_bytes + PAGE_SIZE - 1) & PAGE_MASK; if (rep_bytes > (queue_max_sectors(q) << 9)) rep_bytes = queue_max_sectors(q) << 9; nr_pages = min_t(unsigned int, BIO_MAX_PAGES, rep_bytes >> PAGE_SHIFT); nr_pages = min_t(unsigned int, nr_pages, queue_max_segments(q)); bio = bio_alloc(gfp_mask, nr_pages); if (!bio) return -ENOMEM; bio->bi_bdev = bdev; bio->bi_iter.bi_sector = blk_zone_start(q, sector); bio_set_op_attrs(bio, REQ_OP_ZONE_REPORT, 0); for (i = 0; i < nr_pages; i++) { page = alloc_page(gfp_mask); if (!page) { ret = -ENOMEM; goto out; } if (!bio_add_page(bio, page, PAGE_SIZE, 0)) { __free_page(page); break; } } if (i == 0) ret = -ENOMEM; else ret = submit_bio_wait(bio); if (ret) goto out; /* * Process the report result: skip the header and go through the * reported zones to fixup and fixup the zone information for * partitions. At the same time, return the zone information into * the zone array. */ n = 0; nz = 0; nr_rep = 0; bio_for_each_segment_all(bv, bio, i) { if (!bv->bv_page) break; addr = kmap_atomic(bv->bv_page); /* Get header in the first page */ ofst = 0; if (!nr_rep) { hdr = (struct blk_zone_report_hdr *) addr; nr_rep = hdr->nr_zones; ofst = sizeof(struct blk_zone_report_hdr); } /* Fixup and report zones */ while (ofst < bv->bv_len && n < nr_rep && nz < nrz) { if (blkdev_report_zone(bdev, addr + ofst, &zones[nz])) nz++; ofst += sizeof(struct blk_zone); n++; } kunmap_atomic(addr); if (n >= nr_rep || nz >= nrz) break; } out: bio_for_each_segment_all(bv, bio, i) __free_page(bv->bv_page); bio_put(bio); if (ret == 0) *nr_zones = nz; return ret; }