void complete_master_bio(struct drbd_conf *mdev, struct bio_and_error *m) { bio_endio(m->bio, m->error); dec_ap_bio(mdev); }
static int zram_read(struct zram *zram, struct bio *bio) { int i; u32 index; struct bio_vec *bvec; if (unlikely(!zram->init_done)) { set_bit(BIO_UPTODATE, &bio->bi_flags); bio_endio(bio, 0); return 0; } zram_inc_stat(zram, ZRAM_STAT_NUM_READS); index = bio->bi_sector >> SECTORS_PER_PAGE_SHIFT; bio_for_each_segment(bvec, bio, i) { int ret; size_t zlen; u32 zoffset; struct page *bio_page, *zpage; unsigned char *bio_mem, *zmem; bio_page = bvec->bv_page; if (zram_is_zero_page(zram, index)) { handle_zero_page(bio_page); continue; } zram_find_obj(zram, index, &zpage, &zoffset); /* Requested page is not present in compressed area */ if (unlikely(!zpage)) { pr_debug("Read before write on swap device: " "sector=%lu, size=%u", (ulong)(bio->bi_sector), bio->bi_size); /* Do nothing */ continue; } /* Page is stored uncompressed since it's incompressible */ if (unlikely(!zoffset)) { handle_uncompressed_page(zram, bio_page, index); continue; } bio_mem = kmap_atomic(bio_page, KM_USER0); zlen = PAGE_SIZE; zmem = kmap_atomic(zpage, KM_USER1) + zoffset; ret = lzo1x_decompress_safe(zmem, xv_get_object_size(zmem), bio_mem, &zlen); kunmap_atomic(bio_mem, KM_USER0); kunmap_atomic(zmem, KM_USER1); /* This should NEVER happen - return bio error if it does! */ if (unlikely(ret != LZO_E_OK)) { pr_err("Decompression failed! err=%d, page=%u\n", ret, index); goto out; } flush_dcache_page(bio_page); index++; }
static void bio_chain_endio(struct bio *bio, int error) { bio_endio(bio->bi_private, error); bio_put(bio); }
static void bio_chain_endio(struct bio *bio) { bio_endio(__bio_chain_endio(bio)); }
void bnull_make_request(struct request_queue *q, struct bio *bio) { bio_endio(bio,0); }
static int zram_read(struct zram *zram, struct bio *bio) { int i; u32 index; struct bio_vec *bvec; if (unlikely(!zram->init_done)) { set_bit(BIO_UPTODATE, &bio->bi_flags); bio_endio(bio, 0); return 0; } zram_stat64_inc(zram, &zram->stats.num_reads); index = bio->bi_sector >> SECTORS_PER_PAGE_SHIFT; bio_for_each_segment(bvec, bio, i) { int ret; size_t clen; struct page *page; struct zobj_header *zheader; unsigned char *user_mem, *cmem; page = bvec->bv_page; if (zram_test_flag(zram, index, ZRAM_ZERO)) { handle_zero_page(page); index++; continue; } /* Requested page is not present in compressed area */ if (unlikely(!zram->table[index].page)) { pr_debug("Read before write: sector=%lu, size=%u", (ulong)(bio->bi_sector), bio->bi_size); /* Do nothing */ index++; continue; } /* Page is stored uncompressed since it's incompressible */ if (unlikely(zram_test_flag(zram, index, ZRAM_UNCOMPRESSED))) { handle_uncompressed_page(zram, page, index); index++; continue; } user_mem = kmap_atomic(page, KM_USER0); clen = PAGE_SIZE; cmem = kmap_atomic(zram->table[index].page, KM_USER1) + zram->table[index].offset; ret = lzo1x_decompress_safe( cmem + sizeof(*zheader), xv_get_object_size(cmem) - sizeof(*zheader), user_mem, &clen); kunmap_atomic(user_mem, KM_USER0); kunmap_atomic(cmem, KM_USER1); /* Should NEVER happen. Return bio error if it does. */ if (unlikely(ret != LZO_E_OK)) { pr_err("Decompression failed! err=%d, page=%u\n", ret, index); zram_stat64_inc(zram, &zram->stats.failed_reads); goto out; } flush_dcache_page(page); index++; }
static int aoeblk_make_request(struct request_queue *q, struct bio *bio) { struct sk_buff_head queue; struct aoedev *d; struct buf *buf; ulong flags; blk_queue_bounce(q, &bio); if (bio == NULL) { printk(KERN_ERR "aoe: bio is NULL\n"); BUG(); return 0; } d = bio->bi_bdev->bd_disk->private_data; if (d == NULL) { printk(KERN_ERR "aoe: bd_disk->private_data is NULL\n"); BUG(); bio_endio(bio, -ENXIO); return 0; } else if (bio_rw_flagged(bio, BIO_RW_BARRIER)) { bio_endio(bio, -EOPNOTSUPP); return 0; } else if (bio->bi_io_vec == NULL) { printk(KERN_ERR "aoe: bi_io_vec is NULL\n"); BUG(); bio_endio(bio, -ENXIO); return 0; } buf = mempool_alloc(d->bufpool, GFP_NOIO); if (buf == NULL) { printk(KERN_INFO "aoe: buf allocation failure\n"); bio_endio(bio, -ENOMEM); return 0; } memset(buf, 0, sizeof(*buf)); INIT_LIST_HEAD(&buf->bufs); buf->stime = jiffies; buf->bio = bio; buf->resid = bio->bi_size; buf->sector = bio->bi_sector; buf->bv = &bio->bi_io_vec[bio->bi_idx]; buf->bv_resid = buf->bv->bv_len; WARN_ON(buf->bv_resid == 0); buf->bv_off = buf->bv->bv_offset; spin_lock_irqsave(&d->lock, flags); if ((d->flags & DEVFL_UP) == 0) { printk(KERN_INFO "aoe: device %ld.%d is not up\n", d->aoemajor, d->aoeminor); spin_unlock_irqrestore(&d->lock, flags); mempool_free(buf, d->bufpool); bio_endio(bio, -ENXIO); return 0; } list_add_tail(&buf->bufs, &d->bufq); aoecmd_work(d); __skb_queue_head_init(&queue); skb_queue_splice_init(&d->sendq, &queue); spin_unlock_irqrestore(&d->lock, flags); aoenet_xmit(&queue); return 0; }
/* static int blkdev_make_request(struct request_queue *q,struct bio *bio) { struct bio_vec *bvec; int i; unsigned long long dsk_offset; if((bio->bi_sector<<PAGE_SHIFT)+bio->bi_size >BLKDEV_BYTES) { printk(KERN_ERR "bad request!bi_sector:%llu,bi_size:%u\n",(unsigned long)bio->bi_sector,bio->bi_size); bio_endio(bio,-EIO); } dsk_offset=bio->bi_sector << PAGE_SHIFT; bio_for_each_segment(bvec,bio,i) { unsigned int count_done,count_current; void *iovec_mem; void *dsk_mem; iovec_mem=kmap(bvec->bv_page) + bvec->bv_offset; count_done = 0; while(count_done < bvec->bv_len) { count_current = min(bvec->bv_len - count_done,(unsigned int)(PAGE_SIZE-(dsk_offset + count_done)%PAGE_SIZE)); dsk_mem = radix_tree_lookup(&blkdev_data,(dsk_offset + count_done)%PAGE_SIZE); dsk_mem+=(dsk_offset + count_done)%PAGE_SIZE; switch(bio_rw(bio)) { case READ: case READA: memcpy(iovec_mem +count_done,dsk_mem,count_current); break; case WRITE: memcpy(dsk_mem,iovec_mem+count_done,count_current); } count_done +=count_current; } kunmap(bvec->bv_page); dsk_offset += bvec->bv_len; } bio_endio(bio,0); return 0; } */ static int blkdev_make_request(struct request_queue *q, struct bio *bio) { struct bio_vec *bvec; int i; unsigned long long dsk_offset; if ((bio->bi_sector << 9) + bio->bi_size > BLKDEV_BYTES) { printk(KERN_ERR BLKDEV_DISKNAME ": bad request: block=%llu, count=%u\n", (unsigned long long)bio->bi_sector, bio->bi_size); #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 24) bio_endio(bio, 0, -EIO); #else bio_endio(bio, -EIO); #endif return 0; } dsk_offset = bio->bi_sector << 9; bio_for_each_segment(bvec, bio, i) { unsigned int count_done, count_current; void *iovec_mem; void *dsk_mem; iovec_mem = kmap(bvec->bv_page) + bvec->bv_offset; count_done = 0; while (count_done < bvec->bv_len) { count_current = min(bvec->bv_len - count_done, (unsigned int)(PAGE_SIZE - ((dsk_offset + count_done) & ~PAGE_MASK))); dsk_mem = radix_tree_lookup(&blkdev_data, (dsk_offset + count_done) >> PAGE_SHIFT); if (!dsk_mem) { printk(KERN_ERR BLKDEV_DISKNAME ": search memory failed: %llu\n", (dsk_offset + count_done) >> PAGE_SHIFT); kunmap(bvec->bv_page); #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 24) bio_endio(bio, 0, -EIO); #else bio_endio(bio, -EIO); #endif return 0; } dsk_mem += (dsk_offset + count_done) & ~PAGE_MASK; switch (bio_rw(bio)) { case READ: case READA: memcpy(iovec_mem + count_done, dsk_mem, count_current); break; case WRITE: memcpy(dsk_mem, iovec_mem + count_done, count_current); break; default: printk(KERN_ERR BLKDEV_DISKNAME ": unknown value of bio_rw: %lu\n", bio_rw(bio)); kunmap(bvec->bv_page); #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 24) bio_endio(bio, 0, -EIO); #else bio_endio(bio, -EIO); #endif return 0; } count_done += count_current; }
void sm_pwrite_miss_copy_to_cache_end(struct bittern_cache *bc, struct work_item *wi, int err) { struct bio *bio = wi->wi_original_bio; struct cache_block *cache_block = wi->wi_cache_block; enum cache_state original_state = cache_block->bcb_state; unsigned long cache_flags; M_ASSERT_FIXME(err == 0); M_ASSERT(bio != NULL); ASSERT((wi->wi_flags & WI_FLAG_BIO_CLONED) != 0); ASSERT(wi->wi_original_bio != NULL); cache_block = wi->wi_cache_block; ASSERT(bio != NULL); ASSERT(bio_is_request_single_cache_block(bio)); ASSERT(cache_block->bcb_sector == bio_sector_to_cache_block_sector(bio)); ASSERT(bio == wi->wi_original_bio); ASSERT(cache_block->bcb_state == S_CLEAN_P_WRITE_MISS_CPT_CACHE_END || cache_block->bcb_state == S_DIRTY_P_WRITE_MISS_CPT_CACHE_END); ASSERT(wi->wi_original_cache_block == NULL); BT_TRACE(BT_LEVEL_TRACE2, bc, wi, cache_block, bio, NULL, "copy-to-cache-end"); ASSERT(wi->wi_original_cache_block == NULL); ASSERT_CACHE_STATE(cache_block); ASSERT_CACHE_BLOCK(cache_block, bc); if (cache_block->bcb_state == S_CLEAN_P_WRITE_MISS_CPT_CACHE_END) { spin_lock_irqsave(&cache_block->bcb_spinlock, cache_flags); cache_state_transition_final(bc, cache_block, TS_NONE, S_CLEAN); spin_unlock_irqrestore(&cache_block->bcb_spinlock, cache_flags); } else { ASSERT(cache_block->bcb_state == S_DIRTY_P_WRITE_MISS_CPT_CACHE_END); spin_lock_irqsave(&cache_block->bcb_spinlock, cache_flags); cache_state_transition_final(bc, cache_block, TS_NONE, S_DIRTY); spin_unlock_irqrestore(&cache_block->bcb_spinlock, cache_flags); } cache_put_update_age(bc, cache_block, 1); cache_timer_add(&bc->bc_timer_writes, wi->wi_ts_started); cache_timer_add(&bc->bc_timer_write_misses, wi->wi_ts_started); if (original_state == S_CLEAN_P_WRITE_MISS_CPT_CACHE_END) { cache_timer_add(&bc->bc_timer_write_clean_misses, wi->wi_ts_started); } else { ASSERT(original_state == S_DIRTY_P_WRITE_MISS_CPT_CACHE_END); cache_timer_add(&bc->bc_timer_write_dirty_misses, wi->wi_ts_started); } work_item_free(bc, wi); atomic_dec(&bc->bc_pending_requests); if (bio_data_dir(bio) == WRITE) { atomic_dec(&bc->bc_pending_write_requests); atomic_inc(&bc->bc_completed_write_requests); } else { atomic_dec(&bc->bc_pending_read_requests); atomic_inc(&bc->bc_completed_read_requests); } atomic_inc(&bc->bc_completed_requests); /* * wakeup possible waiters */ wakeup_deferred(bc); bio_endio(bio, 0); }