Esempio n. 1
0
/*
 * Transfer a single BIO.
 */
static int sbull_xfer_bio(struct sbull_dev *dev, struct bio *bio)
{
	int i;
	struct bio_vec *bvec;
	sector_t sector = bio->bi_sector;
	
	bool do_sync; 
  int do_sync_req = 0;

	Nand_OS_LOCK();	
	/* Do each segment independently. */
	bio_for_each_segment(bvec, bio, i) 	//这个是一个宏定义
	{
		char *buffer = __bio_kmap_atomic(bio, i, KM_USER0);//luowl
		
		#if 1
		if(1)//(nand_page_size_get() == 2048)
		{
	  	do_sync = (bio_rw_flagged(bio, BIO_RW_SYNCIO) && bio_data_dir(bio) == WRITE); 
	    if (do_sync) { 
	    	//printk("detect do write sync\n"); 
      	do_sync_req++; 
	    } 
	  }  
    #endif 

		
		sbull_transfer(dev, sector, bio_cur_bytes(bio) >> 9,
				buffer, bio_data_dir(bio) == WRITE);
		sector += bio_cur_bytes(bio) >> 9;
		__bio_kunmap_atomic(bio, KM_USER0);
	}
Esempio n. 2
0
static void multipath_end_request(struct bio *bio, int error)
{
	int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
	struct multipath_bh *mp_bh = bio->bi_private;
	struct mpconf *conf = mp_bh->mddev->private;
	struct md_rdev *rdev = conf->multipaths[mp_bh->path].rdev;

	if (uptodate)
		multipath_end_bh_io(mp_bh, 0);
	else if (!bio_rw_flagged(bio, BIO_RW_AHEAD)) {
		/*
		 * oops, IO error:
		 */
		char b[BDEVNAME_SIZE];
		md_error (mp_bh->mddev, rdev);
		printk(KERN_ERR "multipath: %s: rescheduling sector %llu\n", 
		       bdevname(rdev->bdev,b), 
		       (unsigned long long)bio->bi_sector);
		multipath_reschedule_retry(mp_bh);
	} else
		multipath_end_bh_io(mp_bh, error);
	rdev_dec_pending(rdev, conf->mddev);
}
static int
aoeblk_make_request(struct request_queue *q, struct bio *bio)
{
	struct sk_buff_head queue;
	struct aoedev *d;
	struct buf *buf;
	ulong flags;

	blk_queue_bounce(q, &bio);

	if (bio == NULL) {
		printk(KERN_ERR "aoe: bio is NULL\n");
		BUG();
		return 0;
	}
	d = bio->bi_bdev->bd_disk->private_data;
	if (d == NULL) {
		printk(KERN_ERR "aoe: bd_disk->private_data is NULL\n");
		BUG();
		bio_endio(bio, -ENXIO);
		return 0;
	} else if (bio_rw_flagged(bio, BIO_RW_BARRIER)) {
		bio_endio(bio, -EOPNOTSUPP);
		return 0;
	} else if (bio->bi_io_vec == NULL) {
		printk(KERN_ERR "aoe: bi_io_vec is NULL\n");
		BUG();
		bio_endio(bio, -ENXIO);
		return 0;
	}
	buf = mempool_alloc(d->bufpool, GFP_NOIO);
	if (buf == NULL) {
		printk(KERN_INFO "aoe: buf allocation failure\n");
		bio_endio(bio, -ENOMEM);
		return 0;
	}
	memset(buf, 0, sizeof(*buf));
	INIT_LIST_HEAD(&buf->bufs);
	buf->stime = jiffies;
	buf->bio = bio;
	buf->resid = bio->bi_size;
	buf->sector = bio->bi_sector;
	buf->bv = &bio->bi_io_vec[bio->bi_idx];
	buf->bv_resid = buf->bv->bv_len;
	WARN_ON(buf->bv_resid == 0);
	buf->bv_off = buf->bv->bv_offset;

	spin_lock_irqsave(&d->lock, flags);

	if ((d->flags & DEVFL_UP) == 0) {
		printk(KERN_INFO "aoe: device %ld.%d is not up\n",
			d->aoemajor, d->aoeminor);
		spin_unlock_irqrestore(&d->lock, flags);
		mempool_free(buf, d->bufpool);
		bio_endio(bio, -ENXIO);
		return 0;
	}

	list_add_tail(&buf->bufs, &d->bufq);

	aoecmd_work(d);
	__skb_queue_head_init(&queue);
	skb_queue_splice_init(&d->sendq, &queue);

	spin_unlock_irqrestore(&d->lock, flags);
	aoenet_xmit(&queue);

	return 0;
}