struct bio_vec *bvec_alloc_bs(gfp_t gfp_mask, int nr, unsigned long *idx, struct bio_set *bs) { struct bio_vec *bvl; /* * see comment near bvec_array define! */ switch (nr) { case 1 : *idx = 0; break; case 2 ... 4: *idx = 1; break; case 5 ... 16: *idx = 2; break; case 17 ... 64: *idx = 3; break; case 65 ... 128: *idx = 4; break; case 129 ... BIO_MAX_PAGES: *idx = 5; break; default: return NULL; } /* * idx now points to the pool we want to allocate from */ bvl = mempool_alloc(bs->bvec_pools[*idx], gfp_mask); if (bvl) memset(bvl, 0, bvec_nr_vecs(*idx) * sizeof(struct bio_vec)); return bvl; }
static inline unsigned int bip_integrity_vecs(struct bio_integrity_payload *bip) { if (bip->bip_slab == BIO_POOL_NONE) return BIP_INLINE_VECS; return bvec_nr_vecs(bip->bip_slab); }
/** * bio_alloc_bioset - allocate a bio for I/O * @gfp_mask: the GFP_ mask given to the slab allocator * @nr_iovecs: number of iovecs to pre-allocate * @bs: the bio_set to allocate from * * Description: * bio_alloc_bioset will first try it's on mempool to satisfy the allocation. * If %__GFP_WAIT is set then we will block on the internal pool waiting * for a &struct bio to become free. * * allocate bio and iovecs from the memory pools specified by the * bio_set structure. **/ struct bio *bio_alloc_bioset(gfp_t gfp_mask, int nr_iovecs, struct bio_set *bs) { struct bio *bio = mempool_alloc(bs->bio_pool, gfp_mask); if (likely(bio)) { struct bio_vec *bvl = NULL; bio_init(bio); if (likely(nr_iovecs)) { unsigned long uninitialized_var(idx); bvl = bvec_alloc_bs(gfp_mask, nr_iovecs, &idx, bs); if (unlikely(!bvl)) { mempool_free(bio, bs->bio_pool); bio = NULL; goto out; } bio->bi_flags |= idx << BIO_POOL_OFFSET; bio->bi_max_vecs = bvec_nr_vecs(idx); } bio->bi_io_vec = bvl; } out: return bio; }
/** * bio_alloc_bioset - allocate a bio for I/O * @gfp_mask: the GFP_ mask given to the slab allocator * @nr_iovecs: number of iovecs to pre-allocate * @bs: the bio_set to allocate from. If %NULL, just use kmalloc * * Description: * bio_alloc_bioset will first try its own mempool to satisfy the allocation. * If %__GFP_WAIT is set then we will block on the internal pool waiting * for a &struct bio to become free. If a %NULL @bs is passed in, we will * fall back to just using @kmalloc to allocate the required memory. * * Note that the caller must set ->bi_destructor on succesful return * of a bio, to do the appropriate freeing of the bio once the reference * count drops to zero. **/ struct bio *bio_alloc_bioset(gfp_t gfp_mask, int nr_iovecs, struct bio_set *bs) { struct bio_vec *bvl = NULL; struct bio *bio = NULL; unsigned long idx = 0; void *p = NULL; if (bs) { p = mempool_alloc(bs->bio_pool, gfp_mask); if (!p) goto err; bio = p + bs->front_pad; } else { bio = kmalloc(sizeof(*bio), gfp_mask); if (!bio) goto err; } bio_init(bio); if (unlikely(!nr_iovecs)) goto out_set; if (nr_iovecs <= BIO_INLINE_VECS) { bvl = bio->bi_inline_vecs; nr_iovecs = BIO_INLINE_VECS; } else { bvl = bvec_alloc_bs(gfp_mask, nr_iovecs, &idx, bs); if (unlikely(!bvl)) goto err_free; nr_iovecs = bvec_nr_vecs(idx); } bio->bi_flags |= idx << BIO_POOL_OFFSET; bio->bi_max_vecs = nr_iovecs; out_set: bio->bi_io_vec = bvl; return bio; err_free: if (bs) mempool_free(p, bs->bio_pool); else kfree(bio); err: return NULL; }
/** * bio_integrity_alloc - Allocate integrity payload and attach it to bio * @bio: bio to attach integrity metadata to * @gfp_mask: Memory allocation mask * @nr_vecs: Number of integrity metadata scatter-gather elements * * Description: This function prepares a bio for attaching integrity * metadata. nr_vecs specifies the maximum number of pages containing * integrity metadata that can be attached. */ struct bio_integrity_payload *bio_integrity_alloc(struct bio *bio, gfp_t gfp_mask, unsigned int nr_vecs) { struct bio_integrity_payload *bip; struct bio_set *bs = bio->bi_pool; unsigned long idx = BIO_POOL_NONE; unsigned inline_vecs; if (!bs) { bip = kmalloc(sizeof(struct bio_integrity_payload) + sizeof(struct bio_vec) * nr_vecs, gfp_mask); inline_vecs = nr_vecs; } else { bip = mempool_alloc(bs->bio_integrity_pool, gfp_mask); inline_vecs = BIP_INLINE_VECS; } if (unlikely(!bip)) return NULL; memset(bip, 0, sizeof(*bip)); if (nr_vecs > inline_vecs) { bip->bip_vec = bvec_alloc(gfp_mask, nr_vecs, &idx, bs->bvec_integrity_pool); if (!bip->bip_vec) goto err; bip->bip_max_vcnt = bvec_nr_vecs(idx); } else { bip->bip_vec = bip->bip_inline_vecs; bip->bip_max_vcnt = inline_vecs; } bip->bip_slab = idx; bip->bip_bio = bio; bio->bi_integrity = bip; bio->bi_rw |= REQ_INTEGRITY; return bip; err: mempool_free(bip, bs->bio_integrity_pool); return NULL; }
struct bio_vec *bvec_alloc_bs(gfp_t gfp_mask, int nr, unsigned long *idx, struct bio_set *bs) { struct bio_vec *bvl; /* * If 'bs' is given, lookup the pool and do the mempool alloc. * If not, this is a bio_kmalloc() allocation and just do a * kzalloc() for the exact number of vecs right away. */ if (bs) { /* * see comment near bvec_array define! */ switch (nr) { case 1: *idx = 0; break; case 2 ... 4: *idx = 1; break; case 5 ... 16: *idx = 2; break; case 17 ... 64: *idx = 3; break; case 65 ... 128: *idx = 4; break; case 129 ... BIO_MAX_PAGES: *idx = 5; break; default: return NULL; } /* * idx now points to the pool we want to allocate from */ bvl = mempool_alloc(bs->bvec_pools[*idx], gfp_mask); if (bvl) memset(bvl, 0, bvec_nr_vecs(*idx) * sizeof(struct bio_vec)); } else
/** * bio_integrity_add_page - Attach integrity metadata * @bio: bio to update * @page: page containing integrity metadata * @len: number of bytes of integrity metadata in page * @offset: start offset within page * * Description: Attach a page containing integrity metadata to bio. */ int bio_integrity_add_page(struct bio *bio, struct page *page, unsigned int len, unsigned int offset) { struct bio_integrity_payload *bip = bio->bi_integrity; struct bio_vec *iv; if (bip->bip_vcnt >= bvec_nr_vecs(bip->bip_slab)) { printk(KERN_ERR "%s: bip_vec full\n", __func__); return 0; } iv = bip->bip_vec + bip->bip_vcnt; iv->bv_page = page; iv->bv_len = len; iv->bv_offset = offset; bip->bip_vcnt++; return len; }
/** * bio_alloc_bioset - allocate a bio for I/O * @gfp_mask: the GFP_ mask given to the slab allocator * @nr_iovecs: number of iovecs to pre-allocate * @bs: the bio_set to allocate from. If %NULL, just use kmalloc * * Description: * bio_alloc_bioset will first try its own mempool to satisfy the allocation. * If %__GFP_WAIT is set then we will block on the internal pool waiting * for a &struct bio to become free. If a %NULL @bs is passed in, we will * fall back to just using @kmalloc to allocate the required memory. * * Note that the caller must set ->bi_destructor on succesful return * of a bio, to do the appropriate freeing of the bio once the reference * count drops to zero. **/ struct bio *bio_alloc_bioset(gfp_t gfp_mask, int nr_iovecs, struct bio_set *bs) { unsigned long idx = BIO_POOL_NONE; struct bio_vec *bvl = NULL; struct bio *bio; void *p; p = mempool_alloc(bs->bio_pool, gfp_mask); if (unlikely(!p)) return NULL; bio = p + bs->front_pad; bio_init(bio); if (unlikely(!nr_iovecs)) goto out_set; if (nr_iovecs <= BIO_INLINE_VECS) { bvl = bio->bi_inline_vecs; nr_iovecs = BIO_INLINE_VECS; } else { bvl = bvec_alloc_bs(gfp_mask, nr_iovecs, &idx, bs); if (unlikely(!bvl)) goto err_free; nr_iovecs = bvec_nr_vecs(idx); } out_set: bio->bi_flags |= idx << BIO_POOL_OFFSET; bio->bi_max_vecs = nr_iovecs; bio->bi_io_vec = bvl; return bio; err_free: mempool_free(p, bs->bio_pool); return NULL; }