// Dumps block info
void dump(struct bio *bio) {
  if (!head) {
    // initialize linked list
    tail = head = read_p = kmalloc(sizeof(struct sp_io), GFP_KERNEL);
    if (!tail) {
      // Exception handling
      printk("SP kmalloc failed!\n");
      return;
    }
  } else {
    struct sp_io *temp = kmalloc(sizeof(struct sp_io), GFP_KERNEL);
    if (!temp) {
      // Exception handling
      printk("SP kmalloc failed!\n");
      return;
    }
    tail->next = temp;
    tail = temp;
  }

  // initialize sp_io struct
  tail->sector = bio->bi_sector;
  // get device name
  bdevname(bio->bi_bdev, tail->dev_name);
  tail->count = bio_sectors(bio);
  struct timespec current_time;
  getnstimeofday(&current_time);
  tail->sec = current_time.tv_sec;
  tail->nsec = current_time.tv_nsec;
}
static int
deadline_merge(struct request_queue *q, struct request **req, struct bio *bio)
{
	struct deadline_data *dd = q->elevator->elevator_data;
	struct request *__rq;
	int ret;

	/*
	 * check for front merge
	 */
	if (dd->front_merges) {
		sector_t sector = bio->bi_sector + bio_sectors(bio);

		__rq = elv_rb_find(&dd->sort_list[bio_data_dir(bio)], sector);
		if (__rq) {
			BUG_ON(sector != blk_rq_pos(__rq));

			if (elv_rq_merge_ok(__rq, bio)) {
				ret = ELEVATOR_FRONT_MERGE;
				goto out;
			}
		}
	}

	return ELEVATOR_NO_MERGE;
out:
	*req = __rq;
	return ret;
}
Beispiel #3
0
static int rrpc_submit_io(struct rrpc *rrpc, struct bio *bio,
				struct nvm_rq *rqd, unsigned long flags)
{
	int err;
	struct rrpc_rq *rrq = nvm_rq_to_pdu(rqd);
	uint8_t nr_pages = rrpc_get_pages(bio);
	int bio_size = bio_sectors(bio) << 9;

	if (bio_size < rrpc->dev->sec_size)
		return NVM_IO_ERR;
	else if (bio_size > rrpc->dev->max_rq_size)
		return NVM_IO_ERR;

	err = rrpc_setup_rq(rrpc, bio, rqd, flags, nr_pages);
	if (err)
		return err;

	bio_get(bio);
	rqd->bio = bio;
	rqd->ins = &rrpc->instance;
	rqd->nr_pages = nr_pages;
	rrq->flags = flags;

	err = nvm_submit_io(rrpc->dev, rqd);
	if (err) {
		pr_err("rrpc: I/O submission failed: %d\n", err);
		return NVM_IO_ERR;
	}

	return NVM_IO_OK;
}
Beispiel #4
0
/*
 * Split the bio into several clones.
 */
static void __split_bio(struct mapped_device *md, struct bio *bio)
{
	struct clone_info ci;

	ci.map = dm_get_table(md);
	if (!ci.map) {
		bio_io_error(bio, bio->bi_size);
		return;
	}

	ci.md = md;
	ci.bio = bio;
	ci.io = alloc_io(md);
	ci.io->error = 0;
	atomic_set(&ci.io->io_count, 1);
	ci.io->bio = bio;
	ci.io->md = md;
	ci.sector = bio->bi_sector;
	ci.sector_count = bio_sectors(bio);
	ci.idx = bio->bi_idx;

	start_io_acct(ci.io);
	while (ci.sector_count)
		__clone_and_map(&ci);

	/* drop the extra reference count */
	dec_pending(ci.io, 0);
	dm_table_put(ci.map);
}
Beispiel #5
0
int pblk_write_to_cache(struct pblk *pblk, struct bio *bio, unsigned long flags)
{
	struct request_queue *q = pblk->dev->q;
	struct pblk_w_ctx w_ctx;
	sector_t lba = pblk_get_lba(bio);
	unsigned long start_time = jiffies;
	unsigned int bpos, pos;
	int nr_entries = pblk_get_secs(bio);
	int i, ret;

	generic_start_io_acct(q, WRITE, bio_sectors(bio), &pblk->disk->part0);

	/* Update the write buffer head (mem) with the entries that we can
	 * write. The write in itself cannot fail, so there is no need to
	 * rollback from here on.
	 */
retry:
	ret = pblk_rb_may_write_user(&pblk->rwb, bio, nr_entries, &bpos);
	switch (ret) {
	case NVM_IO_REQUEUE:
		io_schedule();
		goto retry;
	case NVM_IO_ERR:
		pblk_pipeline_stop(pblk);
		goto out;
	}

	if (unlikely(!bio_has_data(bio)))
		goto out;

	pblk_ppa_set_empty(&w_ctx.ppa);
	w_ctx.flags = flags;
	if (bio->bi_opf & REQ_PREFLUSH)
		w_ctx.flags |= PBLK_FLUSH_ENTRY;

	for (i = 0; i < nr_entries; i++) {
		void *data = bio_data(bio);

		w_ctx.lba = lba + i;

		pos = pblk_rb_wrap_pos(&pblk->rwb, bpos + i);
		pblk_rb_write_entry_user(&pblk->rwb, data, w_ctx, pos);

		bio_advance(bio, PBLK_EXPOSED_PAGE_SIZE);
	}

	atomic64_add(nr_entries, &pblk->user_wa);

#ifdef CONFIG_NVM_DEBUG
	atomic_long_add(nr_entries, &pblk->inflight_writes);
	atomic_long_add(nr_entries, &pblk->req_writes);
#endif

	pblk_rl_inserted(&pblk->rl, nr_entries);

out:
	generic_end_io_acct(q, WRITE, &pblk->disk->part0, start_time);
	pblk_write_should_kick(pblk);
	return ret;
}
/**ltl
 * 功能: 分割bio请求
 * 参数:
 * 返回值:
 * 说明:
 */
static void __split_bio(struct mapped_device *md, struct bio *bio)
{
	struct clone_info ci;

	ci.map = dm_get_table(md);
	if (!ci.map) {
		bio_io_error(bio, bio->bi_size);
		return;
	}

	ci.md = md;
	ci.bio = bio;
	ci.io = alloc_io(md);
	ci.io->error = 0;
	atomic_set(&ci.io->io_count, 1);
	ci.io->bio = bio;
	ci.io->md = md;
	ci.sector = bio->bi_sector;	/* 请求起始扇区 */
	
	/* 从这里可以看出数据长度必定是512的整数倍 */
	ci.sector_count = bio_sectors(bio); /* 数据长度(扇区数) */
	ci.idx = bio->bi_idx; /* 当前bi_vec数组下标 */

	start_io_acct(ci.io);
	while (ci.sector_count) /* 分发各个请求 */
		__clone_and_map(&ci);

	/* drop the extra reference count */
	dec_pending(ci.io, 0);
	dm_table_put(ci.map);
}
int blk_try_merge(struct request *rq, struct bio *bio)
{
	if (blk_rq_pos(rq) + blk_rq_sectors(rq) == bio->bi_iter.bi_sector)
		return ELEVATOR_BACK_MERGE;
	else if (blk_rq_pos(rq) - bio_sectors(bio) == bio->bi_iter.bi_sector)
		return ELEVATOR_FRONT_MERGE;
	return ELEVATOR_NO_MERGE;
}
int sba_reiserfs_block_type(char *data, sector_t sector, char *type, struct bio *sba_bio)
{
	int ret = UNKNOWN_BLOCK;

	sba_debug(0, "Got block %ld\n", SBA_SECTOR_TO_BLOCK(sector));

	if (sba_reiserfs_journal_block(sba_bio, sector)) {
	
		sba_debug(0, "Block %ld is a journal block\n", SBA_SECTOR_TO_BLOCK(sector));
		
		/* FIXME: what about revoke blocks ?? */
		if (sba_reiserfs_journal_desc_block(data)) {
			strcpy(type, "J_D");
			ret = JOURNAL_DESC_BLOCK;

			/* 
			 * Find out the real block numbers from this desc block.
			 * sba_reiserfs_handle_descriptor_block() will add the 
			 * real blocknr to a hash_table so that when the checkpt
			 * write comes, it can be identified properly.
			 */

			sba_reiserfs_handle_descriptor_block(data);
		}
		else
		if (sba_reiserfs_journal_commit_block(data)) {
			strcpy(type, "J_C");
			ret = JOURNAL_COMMIT_BLOCK;
		}
		else 
		if (sba_reiserfs_journal_super_block(data)){
			#if 1
			/* 
			 * there is no separate location for journal 
			 * super block. this is just another journal 
			 * data block
			 */
			strcpy(type, "J_O");
			ret = JOURNAL_DATA_BLOCK;
			#else
				strcpy(type, "J_S");
				ret = JOURNAL_SUPER_BLOCK;
			#endif
		}
		else {
			strcpy(type, "J_O");
			ret = JOURNAL_DATA_BLOCK;
		}
	}
	else {
		sba_debug(0, "Block %ld is a non-journal block\n", SBA_SECTOR_TO_BLOCK(sector));

		ret = sba_reiserfs_non_journal_block_type(data, sector, type, bio_sectors(sba_bio)*SBA_HARDSECT);
	}

	sba_debug(0, "returning block type %d\n", ret);
	return ret;
}
/* Update disk stats at start of I/O request */
static void _drbd_start_io_acct(struct drbd_conf *mdev, struct drbd_request *req, struct bio *bio)
{
	const int rw = bio_data_dir(bio);
	int cpu;
	cpu = part_stat_lock();
	part_stat_inc(cpu, &mdev->vdisk->part0, ios[rw]);
	part_stat_add(cpu, &mdev->vdisk->part0, sectors[rw], bio_sectors(bio));
	part_inc_in_flight(&mdev->vdisk->part0, rw);
	part_stat_unlock();
}
Beispiel #10
0
static int
deadline_merge(request_queue_t *q, struct request **req, struct bio *bio)
{
	struct deadline_data *dd = q->elevator->elevator_data;
	struct request *__rq;
	int ret;

	/*
	 * try last_merge to avoid going to hash
	 */
	ret = elv_try_last_merge(q, bio);
	if (ret != ELEVATOR_NO_MERGE) {
		__rq = q->last_merge;
		goto out_insert;
	}

	/*
	 * see if the merge hash can satisfy a back merge
	 */
	__rq = deadline_find_drq_hash(dd, bio->bi_sector);
	if (__rq) {
		BUG_ON(__rq->sector + __rq->nr_sectors != bio->bi_sector);

		if (elv_rq_merge_ok(__rq, bio)) {
			ret = ELEVATOR_BACK_MERGE;
			goto out;
		}
	}

	/*
	 * check for front merge
	 */
	if (dd->front_merges) {
		sector_t rb_key = bio->bi_sector + bio_sectors(bio);

		__rq = deadline_find_drq_rb(dd, rb_key, bio_data_dir(bio));
		if (__rq) {
			BUG_ON(rb_key != rq_rb_key(__rq));

			if (elv_rq_merge_ok(__rq, bio)) {
				ret = ELEVATOR_FRONT_MERGE;
				goto out;
			}
		}
	}

	return ELEVATOR_NO_MERGE;
out:
	q->last_merge = __rq;
out_insert:
	if (ret)
		deadline_hot_drq_hash(dd, RQ_DATA(__rq));
	*req = __rq;
	return ret;
}
Beispiel #11
0
/* Update disk stats at start of I/O request */
static void _drbd_start_io_acct(struct drbd_conf *mdev, struct drbd_request *req, struct bio *bio)
{
	const int rw = bio_data_dir(bio);
#ifndef __disk_stat_inc
	int cpu;
#endif

#ifdef __disk_stat_inc
	__disk_stat_inc(mdev->vdisk, ios[rw]);
	__disk_stat_add(mdev->vdisk, sectors[rw], bio_sectors(bio));
	disk_round_stats(mdev->vdisk);
	mdev->vdisk->in_flight++;
#else
	cpu = part_stat_lock();
	part_stat_inc(cpu, &mdev->vdisk->part0, ios[rw]);
	part_stat_add(cpu, &mdev->vdisk->part0, sectors[rw], bio_sectors(bio));
	part_inc_in_flight(&mdev->vdisk->part0, rw);
	part_stat_unlock();
#endif
}
Beispiel #12
0
static int multipath_make_request (request_queue_t *q, struct bio * bio)
{
	mddev_t *mddev = q->queuedata;
	multipath_conf_t *conf = mddev_to_conf(mddev);
	struct multipath_bh * mp_bh;
	struct multipath_info *multipath;

	if (unlikely(bio_barrier(bio))) {
		bio_endio(bio, bio->bi_size, -EOPNOTSUPP);
		return 0;
	}

	mp_bh = mempool_alloc(conf->pool, GFP_NOIO);

	mp_bh->master_bio = bio;
	mp_bh->mddev = mddev;

	if (bio_data_dir(bio)==WRITE) {
		disk_stat_inc(mddev->gendisk, writes);
		disk_stat_add(mddev->gendisk, write_sectors, bio_sectors(bio));
	} else {
		disk_stat_inc(mddev->gendisk, reads);
		disk_stat_add(mddev->gendisk, read_sectors, bio_sectors(bio));
	}

	mp_bh->path = multipath_map(conf);
	if (mp_bh->path < 0) {
		bio_endio(bio, bio->bi_size, -EIO);
		mempool_free(mp_bh, conf->pool);
		return 0;
	}
	multipath = conf->multipaths + mp_bh->path;

	mp_bh->bio = *bio;
	mp_bh->bio.bi_bdev = multipath->rdev->bdev;
	mp_bh->bio.bi_rw |= (1 << BIO_RW_FAILFAST);
	mp_bh->bio.bi_end_io = multipath_end_request;
	mp_bh->bio.bi_private = mp_bh;
	generic_make_request(&mp_bh->bio);
	return 0;
}
Beispiel #13
0
static MAKE_REQUEST_FN_RET
zvol_request(struct request_queue *q, struct bio *bio)
{
	zvol_state_t *zv = q->queuedata;
	fstrans_cookie_t cookie = spl_fstrans_mark();
	uint64_t offset = BIO_BI_SECTOR(bio);
	unsigned int sectors = bio_sectors(bio);
	int rw = bio_data_dir(bio);
#ifdef HAVE_GENERIC_IO_ACCT
	unsigned long start = jiffies;
#endif
	int error = 0;

	if (bio_has_data(bio) && offset + sectors >
	    get_capacity(zv->zv_disk)) {
		printk(KERN_INFO
		    "%s: bad access: block=%llu, count=%lu\n",
		    zv->zv_disk->disk_name,
		    (long long unsigned)offset,
		    (long unsigned)sectors);
		error = SET_ERROR(EIO);
		goto out1;
	}

	generic_start_io_acct(rw, sectors, &zv->zv_disk->part0);

	if (rw == WRITE) {
		if (unlikely(zv->zv_flags & ZVOL_RDONLY)) {
			error = SET_ERROR(EROFS);
			goto out2;
		}

		if (bio->bi_rw & VDEV_REQ_DISCARD) {
			error = zvol_discard(bio);
			goto out2;
		}

		error = zvol_write(bio);
	} else
		error = zvol_read(bio);

out2:
	generic_end_io_acct(rw, &zv->zv_disk->part0, start);
out1:
	BIO_END_IO(bio, -error);
	spl_fstrans_unmark(cookie);
#ifdef HAVE_MAKE_REQUEST_FN_RET_INT
	return (0);
#elif defined(HAVE_MAKE_REQUEST_FN_RET_QC)
	return (BLK_QC_T_NONE);
#endif
}
Beispiel #14
0
static int
blktap_ring_make_tr_request(struct blktap *tap,
			    struct blktap_request *request,
			    struct blktap_ring_request *breq)
{
	struct bio *bio = request->rq->bio;
	unsigned int nsecs;

	breq->u.tr.nr_sectors    = nsecs = bio_sectors(bio);
	breq->u.tr.sector_number = bio->bi_sector;

	return nsecs;
}
Beispiel #15
0
void __nd_iostat_start(struct bio *bio, unsigned long *start)
{
	struct gendisk *disk = bio->bi_bdev->bd_disk;
	const int rw = bio_data_dir(bio);
	int cpu = part_stat_lock();

	*start = jiffies;
	part_round_stats(cpu, &disk->part0);
	part_stat_inc(cpu, &disk->part0, ios[rw]);
	part_stat_add(cpu, &disk->part0, sectors[rw], bio_sectors(bio));
	part_inc_in_flight(&disk->part0, rw);
	part_stat_unlock();
}
Beispiel #16
0
/**
 * scsi_setup_discard_cmnd - unmap blocks on thinly provisioned device
 * @sdp: scsi device to operate one
 * @rq: Request to prepare
 *
 * Will issue either UNMAP or WRITE SAME(16) depending on preference
 * indicated by target device.
 **/
static int scsi_setup_discard_cmnd(struct scsi_device *sdp, struct request *rq)
{
	struct scsi_disk *sdkp = scsi_disk(rq->rq_disk);
	struct bio *bio = rq->bio;
	sector_t sector = bio->bi_sector;
	unsigned int nr_sectors = bio_sectors(bio);
	unsigned int len;
	int ret;
	char *buf;
	struct page *page;

	if (sdkp->device->sector_size == 4096) {
		sec
Beispiel #17
0
static int
vr_merge(struct request_queue *q, struct request **rqp, struct bio *bio)
{
sector_t sector = bio->bi_sector + bio_sectors(bio);
struct vr_data *vd = vr_get_data(q);
struct request *rq = elv_rb_find(&vd->sort_list, sector);

if (rq && elv_rq_merge_ok(rq, bio)) {
*rqp = rq;
return ELEVATOR_FRONT_MERGE;
}
return ELEVATOR_NO_MERGE;
}
Beispiel #18
0
/*
 * The request function that just remaps the bio built up by
 * dm_merge_bvec.
 */
static int dm_request(request_queue_t *q, struct bio *bio)
{
	int r;
	int rw = bio_data_dir(bio);
	struct mapped_device *md = q->queuedata;

	/*
	 * There is no use in forwarding any barrier request since we can't
	 * guarantee it is (or can be) handled by the targets correctly.
	 */
	if (unlikely(bio_barrier(bio))) {
		bio_endio(bio, bio->bi_size, -EOPNOTSUPP);
		return 0;
	}

	down_read(&md->io_lock);

	disk_stat_inc(dm_disk(md), ios[rw]);
	disk_stat_add(dm_disk(md), sectors[rw], bio_sectors(bio));

	/*
	 * If we're suspended we have to queue
	 * this io for later.
	 */
	while (test_bit(DMF_BLOCK_IO, &md->flags)) {
		up_read(&md->io_lock);

		if (bio_rw(bio) == READA) {
			bio_io_error(bio, bio->bi_size);
			return 0;
		}

		r = queue_io(md, bio);
		if (r < 0) {
			bio_io_error(bio, bio->bi_size);
			return 0;

		} else if (r == 0)
			return 0;	/* deferred successfully */

		/*
		 * We're in a while loop, because someone could suspend
		 * before we get to the following read lock.
		 */
		down_read(&md->io_lock);
	}

	__split_bio(md, bio);
	up_read(&md->io_lock);
	return 0;
}
static struct request *
fiops_find_rq_fmerge(struct fiops_data *fiopsd, struct bio *bio)
{
	struct task_struct *tsk = current;
	struct fiops_ioc *cic;

	cic = fiops_cic_lookup(fiopsd, tsk->io_context);

	if (cic) {
		sector_t sector = bio->bi_sector + bio_sectors(bio);

		return elv_rb_find(&cic->sort_list, sector);
	}

	return NULL;
}
int ll_front_merge_fn(struct request_queue *q, struct request *req,
		      struct bio *bio)
{
	if (blk_rq_sectors(req) + bio_sectors(bio) >
	    blk_rq_get_max_sectors(req)) {
		req->cmd_flags |= REQ_NOMERGE;
		if (req == q->last_merge)
			q->last_merge = NULL;
		return 0;
	}
	if (!bio_flagged(bio, BIO_SEG_VALID))
		blk_recount_segments(q, bio);
	if (!bio_flagged(req->bio, BIO_SEG_VALID))
		blk_recount_segments(q, req->bio);

	return ll_new_hw_segment(q, req, bio);
}
Beispiel #21
0
static void my_block_requeue(struct request_queue *q,struct request *rq)
{
	seq++;
        diff_sec=current_time.tv_sec-init_time.tv_sec;
         diff_nsec=current_time.tv_nsec-init_time.tv_nsec;

	 bio=rq->bio;
	if(bio==NULL)
		 printk("%d,%d %d %ld %ld.%ld 'R'    \n",MAJOR(dev),MINOR(dev),rq->cpu,seq,diff_sec,diff_nsec);
	else
	{
	 RW=(bio->bi_rw)?'R':'W';
	 F='R';
         sector=(bio->bi_io_vec->bv_offset);
         size=bio_sectors(bio);
		 printk("%d,%d %d %ld %ld.%ld %c  %c %lld+%d \n",MAJOR(dev),MINOR(dev),rq->cpu,seq,diff_sec,diff_nsec,F,RW,sector,size);
        }

}
Beispiel #22
0
static void iot_update_stats(struct io_tracker *t, struct bio *bio)
{
	if (bio->bi_sector == from_oblock(t->last_end_oblock) + 1)
		t->nr_seq_samples++;
	else {
		/*
		 * Just one non-sequential IO is enough to reset the
		 * counters.
		 */
		if (t->nr_seq_samples) {
			t->nr_seq_samples = 0;
			t->nr_rand_samples = 0;
		}

		t->nr_rand_samples++;
	}

	t->last_end_oblock = to_oblock(bio->bi_sector + bio_sectors(bio) - 1);
}
Beispiel #23
0
/*
 * The request function that just remaps the bio built up by
 * dm_merge_bvec.
 */
static int dm_request(request_queue_t *q, struct bio *bio)
{
	int r;
	int rw = bio_data_dir(bio);
	struct mapped_device *md = q->queuedata;

	down_read(&md->io_lock);

	disk_stat_inc(dm_disk(md), ios[rw]);
	disk_stat_add(dm_disk(md), sectors[rw], bio_sectors(bio));

	/*
	 * If we're suspended we have to queue
	 * this io for later.
	 */
	while (test_bit(DMF_BLOCK_IO, &md->flags)) {
		up_read(&md->io_lock);

		if (bio_rw(bio) == READA) {
			bio_io_error(bio, bio->bi_size);
			return 0;
		}

		r = queue_io(md, bio);
		if (r < 0) {
			bio_io_error(bio, bio->bi_size);
			return 0;

		} else if (r == 0)
			return 0;	/* deferred successfully */

		/*
		 * We're in a while loop, because someone could suspend
		 * before we get to the following read lock.
		 */
		down_read(&md->io_lock);
	}

	__split_bio(md, bio);
	up_read(&md->io_lock);
	return 0;
}
static int multipath_make_request (struct request_queue *q, struct bio * bio)
{
	mddev_t *mddev = q->queuedata;
	multipath_conf_t *conf = mddev_to_conf(mddev);
	struct multipath_bh * mp_bh;
	struct multipath_info *multipath;
	const int rw = bio_data_dir(bio);
	int cpu;

	if (unlikely(bio_barrier(bio))) {
		bio_endio(bio, -EOPNOTSUPP);
		return 0;
	}

	mp_bh = mempool_alloc(conf->pool, GFP_NOIO);

	mp_bh->master_bio = bio;
	mp_bh->mddev = mddev;

	cpu = part_stat_lock();
	part_stat_inc(cpu, &mddev->gendisk->part0, ios[rw]);
	part_stat_add(cpu, &mddev->gendisk->part0, sectors[rw],
		      bio_sectors(bio));
	part_stat_unlock();

	mp_bh->path = multipath_map(conf);
	if (mp_bh->path < 0) {
		bio_endio(bio, -EIO);
		mempool_free(mp_bh, conf->pool);
		return 0;
	}
	multipath = conf->multipaths + mp_bh->path;

	mp_bh->bio = *bio;
	mp_bh->bio.bi_sector += multipath->rdev->data_offset;
	mp_bh->bio.bi_bdev = multipath->rdev->bdev;
	mp_bh->bio.bi_rw |= (1 << BIO_RW_FAILFAST_TRANSPORT);
	mp_bh->bio.bi_end_io = multipath_end_request;
	mp_bh->bio.bi_private = mp_bh;
	generic_make_request(&mp_bh->bio);
	return 0;
}
int sba_jfs_block_type(char *data, sector_t sector, char *type, struct bio *sba_bio)
{
	int ret = UNKNOWN_BLOCK;

	sba_debug(0, "Got block %ld\n", SBA_SECTOR_TO_BLOCK(sector));

	if (sba_jfs_journal_block(sba_bio, sector)) {
		sba_debug(0, "Block %ld is a journal block\n", SBA_SECTOR_TO_BLOCK(sector));
		
		ret = sba_jfs_handle_journal_block(sector, data);
	}
	else {
		sba_debug(0, "Block %ld is a non-journal block\n", SBA_SECTOR_TO_BLOCK(sector));

		ret = sba_jfs_non_journal_block_type(data, sector, type, bio_sectors(sba_bio)*SBA_HARDSECT);
	}

	sba_debug(0, "returning block type %d\n", ret);
	return ret;
}
Beispiel #26
0
static int tbio_transfer(struct request *req, struct tbio_device *dev)
{
	unsigned int i = 0, offset = 0;
	char *buf;
	unsigned long flags;
	size_t size;

	struct bio_vec *bv;
	struct req_iterator iter;

	size = blk_rq_cur_bytes(req);
	prk_info("bio req of size %zu:", size);
	offset = blk_rq_pos(req) * 512;

	rq_for_each_segment(bv, req, iter) {
		size = bv->bv_len;
		prk_info("%s bio(%u), segs(%u) sect(%u) pos(%lu) off(%u)",
			(bio_data_dir(iter.bio) == READ) ? "READ" : "WRITE",
			i, bio_segments(iter.bio), bio_sectors(iter.bio),
			iter.bio->bi_sector, offset);

		if (get_capacity(req->rq_disk) * 512 < offset) {
			prk_info("Error, small capacity %zu, offset %u",
				get_capacity(req->rq_disk) * 512,
				offset);
			continue;
		}

		buf = bvec_kmap_irq(bv, &flags);
		if (bio_data_dir(iter.bio) == WRITE)
			memcpy(dev->data + offset, buf, size);
		else
			memcpy(buf, dev->data + offset, size);
		offset += size;
		flush_kernel_dcache_page(bv->bv_page);
		bvec_kunmap_irq(buf, &flags);
		++i;
	}
Beispiel #27
0
static void my_block_fun(struct task_struct *task,struct bio* bio)
{
	getrawmonotonic(&current_time);
	if(strcmp(disk->disk_name,bio->bi_bdev->bd_disk->disk_name)==0)
	{
	        	seq++;
			RW=(bio->bi_rw)?'R':'W';
			F=(task->bio_list)?'M':'I';
			sector=(bio->bi_io_vec->bv_offset);
			size=bio_sectors(bio);
			cpu=task_thread_info(task)->cpu;
			diff_sec=current_time.tv_sec-init_time.tv_sec;
                        diff_nsec=current_time.tv_nsec-init_time.tv_nsec;
			printk("%d,%d %d %ld %ld.%ld %d %c %c %lld+%d %s\n",MAJOR(dev),MINOR(dev),cpu,seq,diff_sec,diff_nsec,task->pid,F,RW,sector,size,task->comm); 
		
	}
	if((current_time.tv_sec-init_time.tv_sec)>timer)
	{
		block_fun=NULL;
		block_requeue=NULL;
		block_comp=NULL;
	}
}
int sba_print_bio(struct bio *sba_bio)
{
	int i;
	struct bio_vec *bvl;

	if ((bio_data_dir(sba_bio) == READ) || (bio_data_dir(sba_bio) == READA) || (bio_data_dir(sba_bio) == READ_SYNC)) {
		sba_debug(1, "READ block %ld size = %d sectors\n", SBA_SECTOR_TO_BLOCK(sba_bio->bi_sector), bio_sectors(sba_bio));

		//access each page of data
		bio_for_each_segment(bvl, sba_bio, i) {
			sba_debug(0, "READ: Page vir addrs %0x\n", (int)(bio_iovec_idx(sba_bio, i)->bv_page));
		}
Beispiel #29
0
Datei: zvol.c Projekt: alek-p/zfs
static MAKE_REQUEST_FN_RET
zvol_request(struct request_queue *q, struct bio *bio)
{
	uio_t uio;
	zvol_state_t *zv = q->queuedata;
	fstrans_cookie_t cookie = spl_fstrans_mark();
	int rw = bio_data_dir(bio);
#ifdef HAVE_GENERIC_IO_ACCT
	unsigned long start = jiffies;
#endif
	int error = 0;

	uio.uio_bvec = &bio->bi_io_vec[BIO_BI_IDX(bio)];
	uio.uio_skip = BIO_BI_SKIP(bio);
	uio.uio_resid = BIO_BI_SIZE(bio);
	uio.uio_iovcnt = bio->bi_vcnt - BIO_BI_IDX(bio);
	uio.uio_loffset = BIO_BI_SECTOR(bio) << 9;
	uio.uio_limit = MAXOFFSET_T;
	uio.uio_segflg = UIO_BVEC;

	if (bio_has_data(bio) && uio.uio_loffset + uio.uio_resid >
	    zv->zv_volsize) {
		printk(KERN_INFO
		    "%s: bad access: offset=%llu, size=%lu\n",
		    zv->zv_disk->disk_name,
		    (long long unsigned)uio.uio_loffset,
		    (long unsigned)uio.uio_resid);
		error = SET_ERROR(EIO);
		goto out1;
	}

	generic_start_io_acct(rw, bio_sectors(bio), &zv->zv_disk->part0);

	if (rw == WRITE) {
		if (unlikely(zv->zv_flags & ZVOL_RDONLY)) {
			error = SET_ERROR(EROFS);
			goto out2;
		}

		if (bio_is_discard(bio) || bio_is_secure_erase(bio)) {
			error = zvol_discard(bio);
			goto out2;
		}

		/*
		 * Some requests are just for flush and nothing else.
		 */
		if (uio.uio_resid == 0) {
			if (bio_is_flush(bio))
				zil_commit(zv->zv_zilog, ZVOL_OBJ);
			goto out2;
		}

		error = zvol_write(zv, &uio,
		    bio_is_flush(bio) || bio_is_fua(bio) ||
		    zv->zv_objset->os_sync == ZFS_SYNC_ALWAYS);
	} else
		error = zvol_read(zv, &uio);

out2:
	generic_end_io_acct(rw, &zv->zv_disk->part0, start);
out1:
	BIO_END_IO(bio, -error);
	spl_fstrans_unmark(cookie);
#ifdef HAVE_MAKE_REQUEST_FN_RET_INT
	return (0);
#elif defined(HAVE_MAKE_REQUEST_FN_RET_QC)
	return (BLK_QC_T_NONE);
#endif
}
Beispiel #30
0
void iostash_mkrequest(struct request_queue *q, struct bio *bio)
#endif
{
	struct hdd_info *hdd;
	struct ssd_info *ssd;
	struct iostash_bio *io;
        sce_fmap_t fmap;
	uint32_t nr_sctr;
	sector_t psn;
	make_request_fn *org_mapreq = NULL;
#if KERNEL_VERSION(4,4,0) <= LINUX_VERSION_CODE
	blk_qc_t ret = BLK_QC_T_NONE;
#endif

	DBG("Got bio=%p bio->bi_rw(%lu) request at s=%lu l=%u.\n",
		bio, bio->bi_rw, BIO_SECTOR(bio), bio_sectors(bio));

	rcu_read_lock();
	hdd = hdd_search(bio);
	if (hdd) {
		atomic_inc(&hdd->nr_ref);
		org_mapreq = hdd->org_mapreq;
	}
	rcu_read_unlock();

	if (unlikely(NULL == hdd)) {
		/* have to requeue the request, somebody was holding a
		 * dangling reference */
		ERR("Request holding a dangling make_request_fn pointer\n.");

#if KERNEL_VERSION(4,4,0) <= LINUX_VERSION_CODE
		bio->bi_error = -EAGAIN;
		return ret;
#elif LINUX_VERSION_CODE <= KERNEL_VERSION(3,1,0)
		rmb();		/* read the change in make_request_fn */
		return -EAGAIN; /* retry */
#else
		/* no retry possible in newer kernels since the return
		 * of make_request_fn is no longer checked and retried
		 * if not zero, we cannot unload the module */
		BUG();
		return;
#endif
	}

	if (!hdd->online) {
		ERR("request re-routed due to hdd not being online.\n");
		/* being unloaded, re-route */
		goto out;
	}

	hdd->request_q = q;
	/* calculate physical sector number -- offset partition information */
	psn = BIO_SECTOR(bio) + bio->bi_bdev->bd_part->start_sect;
	nr_sctr = to_sector(BIO_SIZE(bio));
	do {
		if (bio_sectors(bio) == 0)
			break;

		/* partition boundary check */
		if ((psn < hdd->part_start) ||
			((psn + nr_sctr) > hdd->part_end))
			break;

		if (bio_data_dir(bio) == WRITE) {
			gctx.st_write++;

#ifdef SCE_AWT
			/* make sure the request is only for one fragment */
			if (((psn + nr_sctr - 1) / SCE_SCTRPERFRAG) !=
				(psn / SCE_SCTRPERFRAG)) {
				sce_invalidate(hdd->lun, psn, nr_sctr);
				break;
			}
			rcu_read_lock();
			if (sce_get4write(hdd->lun, psn, nr_sctr, &fmap) 
				== SCE_SUCCESS) {
				ssd = (struct ssd_info *)fmap.cdevctx;
				atomic_inc(&ssd->nr_ref);
				rcu_read_unlock();
				if (!ssd->online) {
					sce_put4write(hdd->lun, psn,
						nr_sctr, 1);
					atomic_dec(&ssd->nr_ref);
				} else {
					io = _io_alloc(hdd, ssd, fmap.fragnum, bio, psn);
					if (NULL == io) {
						atomic_dec(&ssd->nr_ref);
						break;
					}
#if KERNEL_VERSION(4,4,0) <= LINUX_VERSION_CODE
					ret = _io_worker_run(&io->work);
#else
					_io_queue(io);
#endif
					/* lose the reference to hdd, not needed anymore */
					atomic_dec(&hdd->nr_ref);
#if KERNEL_VERSION(4,4,0) <= LINUX_VERSION_CODE
					return ret;
#elif LINUX_VERSION_CODE <= KERNEL_VERSION(3,1,0)
					return 0;
#else
					return;
#endif
				}
			} else
				rcu_read_unlock();
#else
			sce_invalidate(hdd->lun, psn, nr_sctr);
#endif
			break;
		}
		else
		{
			/* Read handling */
			gctx.st_read++;

			/* make sure the request is only for one fragment */
			if (((psn + nr_sctr - 1) / SCE_SCTRPERFRAG) !=
				(psn / SCE_SCTRPERFRAG))
				break;

			/* cache hit/miss check */
			rcu_read_lock();
			if (sce_get4read(hdd->lun, psn, nr_sctr, &fmap) != SCE_SUCCESS) {
				rcu_read_unlock();
				break;
			}
			BUG_ON(NULL == fmap.cdevctx);
			ssd = (struct ssd_info *) fmap.cdevctx;
			atomic_inc(&ssd->nr_ref);
			rcu_read_unlock();
			/* make sure the request is within the SSD limits and the SSD is online */
			if (!ssd->online || ssd->queue_max_hw_sectors < nr_sctr) {
				sce_put4read(hdd->lun, psn, nr_sctr);
				atomic_dec(&ssd->nr_ref);
				break;
			}

			/* cache hit */
			io = _io_alloc(hdd, ssd, fmap.fragnum, bio, psn);
			if (NULL == io) {
				atomic_dec(&ssd->nr_ref);
				break;
			}

#if KERNEL_VERSION(4,4,0) <= LINUX_VERSION_CODE
			ret = _io_worker_run(&io->work);
#else
			_io_queue(io);
#endif
			/* lose the reference to hdd , not needed anymore */
			atomic_dec(&hdd->nr_ref);
		}

#if KERNEL_VERSION(4,4,0) <= LINUX_VERSION_CODE
		return ret;
#elif LINUX_VERSION_CODE <= KERNEL_VERSION(3,1,0)
		return 0;
#else
		return;
#endif
	} while (0);

out:
	/* lose the reference to hdd , not needed anymore */
	atomic_dec(&hdd->nr_ref);

	return (org_mapreq) (q, bio);
}