Exemple #1
0
/**
 * blk_add_trace_rq_remap - Add a trace for a request-remap operation
 * @q:		queue the io is for
 * @rq:		the source request
 * @dev:	target device
 * @from:	source sector
 *
 * Description:
 *     Device mapper remaps request to other devices.
 *     Add a trace for that action.
 *
 **/
static void blk_add_trace_rq_remap(struct request_queue *q,
				   struct request *rq, dev_t dev,
				   sector_t from)
{
	struct blk_trace *bt = q->blk_trace;
	struct blk_io_trace_remap r;

	if (likely(!bt))
		return;

	r.device_from = cpu_to_be32(dev);
	r.device_to   = cpu_to_be32(disk_devt(rq->rq_disk));
	r.sector_from = cpu_to_be64(from);

	__blk_add_trace(bt, blk_rq_pos(rq), blk_rq_bytes(rq),
			rq_data_dir(rq), BLK_TA_REMAP, !!rq->errors,
			sizeof(r), &r);
}
Exemple #2
0
Fichier : zvol.c Projet : avg-I/zfs
static void
zvol_discard(void *arg)
{
    struct request *req = (struct request *)arg;
    struct request_queue *q = req->q;
    zvol_state_t *zv = q->queuedata;
    fstrans_cookie_t cookie = spl_fstrans_mark();
    uint64_t start = blk_rq_pos(req) << 9;
    uint64_t end = start + blk_rq_bytes(req);
    int error;
    rl_t *rl;

    if (end > zv->zv_volsize) {
        error = EIO;
        goto out;
    }

    /*
     * Align the request to volume block boundaries. If we don't,
     * then this will force dnode_free_range() to zero out the
     * unaligned parts, which is slow (read-modify-write) and
     * useless since we are not freeing any space by doing so.
     */
    start = P2ROUNDUP(start, zv->zv_volblocksize);
    end = P2ALIGN(end, zv->zv_volblocksize);

    if (start >= end) {
        error = 0;
        goto out;
    }

    rl = zfs_range_lock(&zv->zv_znode, start, end - start, RL_WRITER);

    error = dmu_free_long_range(zv->zv_objset, ZVOL_OBJ, start, end-start);

    /*
     * TODO: maybe we should add the operation to the log.
     */

    zfs_range_unlock(rl);
out:
    blk_end_request(req, -error, blk_rq_bytes(req));
    spl_fstrans_unmark(cookie);
}
Exemple #3
0
static void
vr_add_rq_rb(struct vr_data *vd, struct request *rq)
{
	elv_rb_add(&vd->sort_list, rq);

	if (blk_rq_pos(rq) >= vd->last_sector) {
		if (!vd->next_rq || blk_rq_pos(vd->next_rq) > blk_rq_pos(rq))
			vd->next_rq = rq;
	}
	else {
		if (!vd->prev_rq || blk_rq_pos(vd->prev_rq) < blk_rq_pos(rq))
			vd->prev_rq = rq;
	}

	BUG_ON(vd->next_rq && vd->next_rq == vd->prev_rq);
	BUG_ON(vd->next_rq && vd->prev_rq && blk_rq_pos(vd->next_rq) < blk_rq_pos(vd->prev_rq));
}
Exemple #4
0
/*
 * do_blkif_request
 *  read a block; request is in a request queue
 */
static void do_blkif_request(struct request_queue *rq)
{
	struct blkfront_info *info = NULL;
	struct request *req;
	int queued;

	pr_debug("Entered do_blkif_request\n");

	queued = 0;

	while ((req = blk_peek_request(rq)) != NULL) {
		info = req->rq_disk->private_data;

		if (RING_FULL(&info->ring))
			goto wait;

		blk_start_request(req);

		if (!blk_fs_request(req)) {
			__blk_end_request_all(req, -EIO);
			continue;
		}

		pr_debug("do_blk_req %p: cmd %p, sec %lx, "
			 "(%u/%u) buffer:%p [%s]\n",
			 req, req->cmd, (unsigned long)blk_rq_pos(req),
			 blk_rq_cur_sectors(req), blk_rq_sectors(req),
			 req->buffer, rq_data_dir(req) ? "write" : "read");

		if (blkif_queue_request(req)) {
			blk_requeue_request(rq, req);
wait:
			/* Avoid pointless unplugs. */
			blk_stop_queue(rq);
			break;
		}

		queued++;
	}

	if (queued != 0)
		flush_requests(info);
}
Exemple #5
0
Fichier : zvol.c Projet : torn5/zfs
static void
zvol_discard(void *arg)
{
	struct request *req = (struct request *)arg;
	struct request_queue *q = req->q;
	zvol_state_t *zv = q->queuedata;
	uint64_t offset = blk_rq_pos(req) << 9;
	uint64_t size = blk_rq_bytes(req);
	int error;
	rl_t *rl;

	/*
	 * Annotate this call path with a flag that indicates that it is
	 * unsafe to use KM_SLEEP during memory allocations due to the
	 * potential for a deadlock.  KM_PUSHPAGE should be used instead.
	 */
	ASSERT(!(current->flags & PF_NOFS));
	current->flags |= PF_NOFS;

	if (offset + size > zv->zv_volsize) {
		blk_end_request(req, -EIO, size);
		goto out;
	}

	if (size == 0) {
		blk_end_request(req, 0, size);
		goto out;
	}

	rl = zfs_range_lock(&zv->zv_znode, offset, size, RL_WRITER);

	error = dmu_free_long_range(zv->zv_objset, ZVOL_OBJ, offset, size);

	/*
	 * TODO: maybe we should add the operation to the log.
	 */

	zfs_range_unlock(rl);

	blk_end_request(req, -error, size);
out:
	current->flags &= ~PF_NOFS;
}
Exemple #6
0
Fichier : vbd.c Projet : truncs/vbd
/*
 * Service each request in the queue. If the request
 * is not a REQ_TYPE_FS type then just skip the request
 * notifying that it is skipping this request.
 */
static void vbd_request(struct request_queue * q) {
  struct request *req;
  req = blk_fetch_request(q);

  while(req != NULL) {

	/* This should not happen normally but just in case */
	if(req == NULL || (req->cmd_type != REQ_TYPE_FS)) {
	  printk(KERN_NOTICE "Skip non fs type request\n");
	  __blk_end_request_all(req, -EIO);
	  continue;
	}

	vbd_tx(&device,blk_rq_pos(req), blk_rq_cur_sectors(req),
		   req->buffer, rq_data_dir(req));
	if(!__blk_end_request_cur(req, 0))
	  req = blk_fetch_request(q);
  }
}
/*
 * Performs a read request.
 */
static int sd_read_request(struct sd_host *host, struct request *req)
{
	int i;
	unsigned long nr_blocks; /* in card blocks */
	size_t block_len; /* in bytes */
	unsigned long start;
	void *buf = req->buffer;
	int retval;

	/*
	 * It seems that some cards do not accept single block reads for the
	 * read block length reported by the card.
	 * For now, we perform only 512 byte single block reads.
	 */

	start = blk_rq_pos(req) << KERNEL_SECTOR_SHIFT;
#if 0
	nr_blocks = req->current_nr_sectors >>
			 (host->card.csd.read_blkbits - KERNEL_SECTOR_SHIFT);
	block_len = 1 << host->card.csd.read_blkbits;
#else
	nr_blocks = blk_rq_cur_sectors(req);
	block_len = 1 << KERNEL_SECTOR_SHIFT;
#endif

	for (i = 0; i < nr_blocks; i++) {
		retval = sd_read_single_block(host, start, buf, block_len);
		if (retval < 0)
			break;

		start += block_len;
		buf += block_len;
	}

	/* number of kernel sectors transferred */
#if 0
	retval = i << (host->card.csd.read_blkbits - KERNEL_SECTOR_SHIFT);
#else
	retval = i;
#endif

	return retval;
}
static void my_request(struct request_queue *q)
{
	struct request *rq;
	int size, res = 0;
	char *ptr;
	unsigned nr_sectors, sector;
	printk(KERN_INFO "entering request routine\n");

	rq = blk_fetch_request(q);
	while (rq) {
		if (!blk_fs_request(rq)) {
			printk(KERN_WARNING
			       "This was not a normal fs request, skipping\n");
			goto done;
		}
		nr_sectors = blk_rq_cur_sectors(rq);
		sector = blk_rq_pos(rq);

		ptr = ramdisk + sector * sector_size;
		size = nr_sectors * sector_size;

		if ((ptr + size) > (ramdisk + disk_size)) {
			printk(KERN_WARNING
			       " tried to go past end of device\n");
			goto done;
		}

		if (rq_data_dir(rq)) {
			printk(KERN_INFO "writing at sector %d, %u sectors \n",
			       sector, nr_sectors);
			memcpy(ptr, rq->buffer, size);
		} else {
			printk(KERN_INFO "reading at sector %d, %u sectors \n",
			       sector, nr_sectors);
			memcpy(rq->buffer, ptr, size);
		}
	      done:
		if (!__blk_end_request_cur(rq, res))
			rq = blk_fetch_request(q);
	}
	printk(KERN_INFO "leaving request\n");
}
Exemple #9
0
/*
 * Returns:
 * DM_MAPIO_*       : the request has been processed as indicated
 * DM_MAPIO_REQUEUE : the original request needs to be immediately requeued
 * < 0              : the request was completed due to failure
 */
static int map_request(struct dm_rq_target_io *tio)
{
	int r;
	struct dm_target *ti = tio->ti;
	struct mapped_device *md = tio->md;
	struct request *rq = tio->orig;
	struct request *clone = NULL;

	r = ti->type->clone_and_map_rq(ti, rq, &tio->info, &clone);
	switch (r) {
	case DM_MAPIO_SUBMITTED:
		/* The target has taken the I/O to submit by itself later */
		break;
	case DM_MAPIO_REMAPPED:
		if (setup_clone(clone, rq, tio, GFP_ATOMIC)) {
			/* -ENOMEM */
			ti->type->release_clone_rq(clone);
			return DM_MAPIO_REQUEUE;
		}

		/* The target has remapped the I/O so dispatch it */
		trace_block_rq_remap(clone->q, clone, disk_devt(dm_disk(md)),
				     blk_rq_pos(rq));
		dm_dispatch_clone_request(clone, rq);
		break;
	case DM_MAPIO_REQUEUE:
		/* The target wants to requeue the I/O */
		break;
	case DM_MAPIO_DELAY_REQUEUE:
		/* The target wants to requeue the I/O after a delay */
		dm_requeue_original_request(tio, true);
		break;
	case DM_MAPIO_KILL:
		/* The target wants to complete the I/O */
		dm_kill_unmapped_request(rq, -EIO);
	default:
		DMWARN("unimplemented target map return value: %d", r);
		BUG();
	}

	return r;
}
Exemple #10
0
static void looper_request(struct request_queue *q) {
 
  struct request *req;
 
  printk(KERN_INFO "looper: executing request");
   
  req = blk_fetch_request(q);
  while (req != NULL) {
    if (req == NULL || (req->cmd_type != REQ_TYPE_FS)) {
      printk (KERN_NOTICE "Skip non-CMD request\n");
      __blk_end_request_all(req, -EIO);
      continue;
    }
    looper_transfer(&Device, blk_rq_pos(req), blk_rq_cur_sectors(req),
		 req->buffer, rq_data_dir(req));
    if ( ! __blk_end_request_cur(req, 0) ) {
      req = blk_fetch_request(q);
    }
  }
}
Exemple #11
0
/*
 * The simple form of the request function.
 */
static void sbull_request(struct request_queue *q)
{
	struct request *req;

	while ((req = blk_fetch_request(q)) != NULL) {
		struct sbull_dev *dev = req->rq_disk->private_data;
		if (req->cmd_type != REQ_TYPE_FS) {
			printk (KERN_NOTICE "Skip non-fs request\n");
			__blk_end_request_cur(req, -EIO);
			continue;
		}
    //    	printk (KERN_NOTICE "Req dev %d dir %ld sec %ld, nr %d f %lx\n",
    //    			dev - Devices, rq_data_dir(req),
    //    			req->sector, req->current_nr_sectors,
    //    			req->flags);
		sbull_transfer(dev, blk_rq_pos(req), blk_rq_cur_sectors(req),
				req->buffer, rq_data_dir(req));
		__blk_end_request_cur(req, 0);
	}
}
Exemple #12
0
static void sb_request(struct request_queue *q) {
	struct request *req;
	int error;

	req = blk_fetch_request(q);
	while (req != NULL) {
		/* Check request type */
		if (req == NULL || (req->cmd_type != REQ_TYPE_FS)) {
			__blk_end_request_all(req, -EIO);
			continue;
		}
		/* Do transfer */
		error = sb_transfer(sbd, blk_rq_pos(req), blk_rq_cur_sectors(req), req->buffer, rq_data_dir(req));
		if (!__blk_end_request_cur(req, error ? -EIO : 0) ) {
			req = blk_fetch_request(q);
		}
	}

	return;
}
Exemple #13
0
/*
 * Simply used for requesting a transfer (read or write) of
 * data from the RAM disk.
 */
static void osurd_request(struct request_queue *q)
{
    struct request *req;
    req = blk_fetch_request(q);

    while(req != NULL) {
        struct osurd_dev *dev = req->rq_disk->private_data;
        if(req->cmd_type != REQ_TYPE_FS) {
            printk(KERN_NOTICE "Skip non-fs request\n");
            __blk_end_request_all(req, -EIO);
            continue;
        }
        osurd_transfer(dev, blk_rq_pos(req), blk_rq_cur_sectors(req),
                       req->buffer, rq_data_dir(req)):

            if(!__blk_end_request_cur(req, 0)) {
            req = blk_fetch_request(q);
        }
    }
}
Exemple #14
0
static void sbd_request(struct request_queue *q) {
    struct request *req;

    req = blk_fetch_request(q);
    while (req != NULL) {
        // blk_fs_request() was removed in 2.6.36 - many thanks to
        // Christian Paro for the heads up and fix...
        //if (!blk_fs_request(req)) {
        if (req == NULL || (req->cmd_type != REQ_TYPE_FS)) {
            printk (KERN_NOTICE "Skip non-CMD request\n");
            __blk_end_request_all(req, -EIO);
            continue;
        }
        sbd_transfer(&Device, blk_rq_pos(req), blk_rq_cur_sectors(req),
                req->buffer, rq_data_dir(req));
        if ( ! __blk_end_request_cur(req, 0) ) {
            req = blk_fetch_request(q);
        }
    }
}
Exemple #15
0
static int nbdx_request(struct request *req, struct nbdx_queue *xq)
{
	struct nbdx_file *xdev;
	unsigned long start = blk_rq_pos(req) << NBDX_SECT_SHIFT;
	unsigned long len  = blk_rq_cur_bytes(req);
	int write = rq_data_dir(req) == WRITE;
	int err;
	void* buffer = bio_data(req->bio);

	pr_debug("%s called\n", __func__);

	xdev = req->rq_disk->private_data;

	err = nbdx_transfer(xdev, buffer, start, len, write, req, xq);
	if (unlikely(err))
		pr_err("transfer failed for req %p\n", req);

	return err;

}
Exemple #16
0
int sd_zbc_setup_report_cmnd(struct scsi_cmnd *cmd)
{
	struct request *rq = cmd->request;
	struct scsi_disk *sdkp = scsi_disk(rq->rq_disk);
	sector_t lba, sector = blk_rq_pos(rq);
	unsigned int nr_bytes = blk_rq_bytes(rq);
	int ret;

	WARN_ON(nr_bytes == 0);

	if (!sd_is_zoned(sdkp))
		/* Not a zoned device */
		return BLKPREP_KILL;

	ret = scsi_init_io(cmd);
	if (ret != BLKPREP_OK)
		return ret;

	cmd->cmd_len = 16;
	memset(cmd->cmnd, 0, cmd->cmd_len);
	cmd->cmnd[0] = ZBC_IN;
	cmd->cmnd[1] = ZI_REPORT_ZONES;
	lba = sectors_to_logical(sdkp->device, sector);
	put_unaligned_be64(lba, &cmd->cmnd[2]);
	put_unaligned_be32(nr_bytes, &cmd->cmnd[10]);
	/* Do partial report for speeding things up */
	cmd->cmnd[14] = ZBC_REPORT_ZONE_PARTIAL;

	cmd->sc_data_direction = DMA_FROM_DEVICE;
	cmd->sdb.length = nr_bytes;
	cmd->transfersize = sdkp->device->sector_size;
	cmd->allowed = 0;

	/*
	 * Report may return less bytes than requested. Make sure
	 * to report completion on the entire initial request.
	 */
	rq->__data_len = nr_bytes;

	return BLKPREP_OK;
}
static int htifblk_segment(struct htifblk_device *dev,
	struct request *req)
{
	static struct htifblk_request pkt __aligned(HTIF_ALIGN);
	u64 offset, size, end;
	unsigned long cmd;

	offset = (blk_rq_pos(req) << SECTOR_SIZE_SHIFT);
	size = (blk_rq_cur_sectors(req) << SECTOR_SIZE_SHIFT);

	end = offset + size;
	if (unlikely(end < offset || end > dev->size)) {
		dev_err(&dev->dev->dev, "out-of-bounds access:"
			" offset=%llu size=%llu\n", offset, size);
		return -EINVAL;
	}

	rmb();
	pkt.addr = __pa(req->buffer);
	pkt.offset = offset;
	pkt.size = size;
	pkt.tag = dev->tag;

	switch (rq_data_dir(req)) {
	case READ:
		cmd = HTIF_CMD_READ;
		break;
	case WRITE:
		cmd = HTIF_CMD_WRITE;
		break;
	default:
		return -EINVAL;
	}

	dev->req = req;
	dev->msg_buf.dev = dev->dev->index;
	dev->msg_buf.cmd = cmd;
	dev->msg_buf.data = __pa(&pkt);
	htif_tohost(&dev->msg_buf);
	return 0;
}
void blk_dump_rq_flags(struct request *rq, char *msg)
{
	int bit;

	printk(KERN_INFO "%s: dev %s: type=%x, flags=%x\n", msg,
		rq->rq_disk ? rq->rq_disk->disk_name : "?", rq->cmd_type,
		rq->cmd_flags);

	printk(KERN_INFO "  sector %llu, nr/cnr %u/%u\n",
	       (unsigned long long)blk_rq_pos(rq),
	       blk_rq_sectors(rq), blk_rq_cur_sectors(rq));
	printk(KERN_INFO "  bio %p, biotail %p, buffer %p, len %u\n",
	       rq->bio, rq->biotail, rq->buffer, blk_rq_bytes(rq));

	if (rq->cmd_type == REQ_TYPE_BLOCK_PC) {
		printk(KERN_INFO "  cdb: ");
		for (bit = 0; bit < BLK_MAX_CDB; bit++)
			printk("%02x ", rq->cmd[bit]);
		printk("\n");
	}
}
Exemple #19
0
static void blk_add_trace_rq(struct request_queue *q, struct request *rq,
				    u32 what)
{
	struct blk_trace *bt = q->blk_trace;
	int rw = rq->cmd_flags & 0x03;

	if (likely(!bt))
		return;

	if (blk_discard_rq(rq))
		rw |= (1 << BIO_RW_DISCARD);

	if (blk_pc_request(rq)) {
		what |= BLK_TC_ACT(BLK_TC_PC);
		__blk_add_trace(bt, 0, blk_rq_bytes(rq), rw,
				what, rq->errors, rq->cmd_len, rq->cmd);
	} else  {
		what |= BLK_TC_ACT(BLK_TC_FS);
		__blk_add_trace(bt, blk_rq_pos(rq), blk_rq_bytes(rq), rw,
				what, rq->errors, 0, NULL);
	}
}
static void ramdisk_request(struct request_queue *q)
{
    struct request *req;
    int ret;

    req = blk_fetch_request(q);
    while (req) {
        struct ramdisk_dev *dev = req->rq_disk->private_data;
        if (req->cmd_type != REQ_TYPE_FS) {
            printk (KERN_NOTICE "Skip non-fs request\n");
            ret = -EIO;
            goto done;
        }
        msg_dbg (KERN_NOTICE "Req dev %u dir %d sec %ld, nr %d\n",
            (unsigned int)dev, rq_data_dir(req),
                (long int)blk_rq_pos(req), blk_rq_cur_sectors(req));
        ramdisk_transfer(dev, req);
        ret = 0;
    done:
        req = blk_fetch_request(q);
    }
}
Exemple #21
0
static void xfer_timeout(unsigned long data)
{
	struct floppy_state *fs = (struct floppy_state *) data;
	struct swim3 __iomem *sw = fs->swim3;
	struct dbdma_regs __iomem *dr = fs->dma;
	int n;

	fs->timeout_pending = 0;
	out_le32(&dr->control, RUN << 16);
	/* We must wait a bit for dbdma to stop */
	for (n = 0; (in_le32(&dr->status) & ACTIVE) && n < 1000; n++)
		udelay(1);
	out_8(&sw->intr_enable, 0);
	out_8(&sw->control_bic, WRITE_SECTORS | DO_ACTION);
	out_8(&sw->select, RELAX);
	printk(KERN_ERR "swim3: timeout %sing sector %ld\n",
	       (rq_data_dir(fd_req)==WRITE? "writ": "read"),
	       (long)blk_rq_pos(fd_req));
	swim3_end_request_cur(-EIO);
	fs->state = idle;
	start_request(fs);
}
Exemple #22
0
/**
 * blk_add_trace_rq - Add a trace for a request oriented action
 * @q:		queue the io is for
 * @rq:		the source request
 * @what:	the action
 *
 * Description:
 *     Records an action against a request. Will log the bio offset + size.
 *
 **/
static void blk_add_trace_rq(struct request_queue *q, struct request *rq,
				    u32 what)
{
	struct blk_trace *bt = q->blk_trace;
	int rw = rq->cmd_flags & 0x03;

	if (likely(!bt))
		return;

	if (rq->cmd_flags & REQ_DISCARD)
		rw |= REQ_DISCARD;

	if (rq->cmd_type == REQ_TYPE_BLOCK_PC) {
		what |= BLK_TC_ACT(BLK_TC_PC);
		__blk_add_trace(bt, 0, blk_rq_bytes(rq), rw,
				what, rq->errors, rq->cmd_len, rq->cmd);
	} else  {
		what |= BLK_TC_ACT(BLK_TC_FS);
		__blk_add_trace(bt, blk_rq_pos(rq), blk_rq_bytes(rq), rw,
				what, rq->errors, 0, NULL);
	}
}
Exemple #23
0
static void sbd_request(struct request_queue *q) {
        struct request *req;
        unsigned long offset;
        unsigned long nbytes;

        req = blk_fetch_request(q);
        while (req != NULL) {
                if (req == NULL || (req->cmd_type != REQ_TYPE_FS)) {
                        printk (KERN_NOTICE "Skip non-CMD request\n");
                        __blk_end_request_all(req, -EIO);
                        continue;
                }
		offset = blk_rq_pos(req) * logical_block_size;
		nbytes = blk_rq_cur_sectors(req) * logical_block_size;

		operar_sector(&Device, offset, nbytes, req->buffer, rq_data_dir(req));

                if ( ! __blk_end_request_cur(req, 0) ) {
                        req = blk_fetch_request(q);
                }
        }
}
Exemple #24
0
static int tbio_transfer(struct request *req, struct tbio_device *dev)
{
	unsigned int i = 0, offset = 0;
	char *buf;
	unsigned long flags;
	size_t size;

	struct bio_vec *bv;
	struct req_iterator iter;

	size = blk_rq_cur_bytes(req);
	prk_info("bio req of size %zu:", size);
	offset = blk_rq_pos(req) * 512;

	rq_for_each_segment(bv, req, iter) {
		size = bv->bv_len;
		prk_info("%s bio(%u), segs(%u) sect(%u) pos(%lu) off(%u)",
			(bio_data_dir(iter.bio) == READ) ? "READ" : "WRITE",
			i, bio_segments(iter.bio), bio_sectors(iter.bio),
			iter.bio->bi_sector, offset);

		if (get_capacity(req->rq_disk) * 512 < offset) {
			prk_info("Error, small capacity %zu, offset %u",
				get_capacity(req->rq_disk) * 512,
				offset);
			continue;
		}

		buf = bvec_kmap_irq(bv, &flags);
		if (bio_data_dir(iter.bio) == WRITE)
			memcpy(dev->data + offset, buf, size);
		else
			memcpy(buf, dev->data + offset, size);
		offset += size;
		flush_kernel_dcache_page(bv->bv_page);
		bvec_kunmap_irq(buf, &flags);
		++i;
	}
Exemple #25
0
/*
 * Common write path running under the zvol taskq context.  This function
 * is responsible for copying the request structure data in to the DMU and
 * signaling the request queue with the result of the copy.
 */
static void
zvol_write(void *arg)
{
	struct request *req = (struct request *)arg;
	struct request_queue *q = req->q;
	zvol_state_t *zv = q->queuedata;
	uint64_t offset = blk_rq_pos(req) << 9;
	uint64_t size = blk_rq_bytes(req);
	int error = 0;
	dmu_tx_t *tx;
	rl_t *rl;

	rl = zfs_range_lock(&zv->zv_znode, offset, size, RL_WRITER);

	tx = dmu_tx_create(zv->zv_objset);
	dmu_tx_hold_write(tx, ZVOL_OBJ, offset, size);

	/* This will only fail for ENOSPC */
	error = dmu_tx_assign(tx, TXG_WAIT);
	if (error) {
		dmu_tx_abort(tx);
		zfs_range_unlock(rl);
		blk_end_request(req, -error, size);
		return;
	}

	error = dmu_write_req(zv->zv_objset, ZVOL_OBJ, req, tx);
	if (error == 0)
		zvol_log_write(zv, tx, offset, size, rq_is_sync(req));

	dmu_tx_commit(tx);
	zfs_range_unlock(rl);

	if (rq_is_sync(req))
		zil_commit(zv->zv_zilog, ZVOL_OBJ);

	blk_end_request(req, -error, size);
}
static void read_intr(void)
{
	struct request *req;
	int i, retries = 100000;

	do {
		i = (unsigned) inb_p(HD_STATUS);
		if (i & BUSY_STAT)
			continue;
		if (!OK_STATUS(i))
			break;
		if (i & DRQ_STAT)
			goto ok_to_read;
	} while (--retries > 0);
	dump_status("read_intr", i);
	bad_rw_intr();
	hd_request();
	return;

ok_to_read:
	req = hd_req;
	insw(HD_DATA, req->buffer, 256);
#ifdef DEBUG
	printk("%s: read: sector %ld, remaining = %u, buffer=%p\n",
	       req->rq_disk->disk_name, blk_rq_pos(req) + 1,
	       blk_rq_sectors(req) - 1, req->buffer+512);
#endif
	if (hd_end_request(0, 512)) {
		SET_HANDLER(&read_intr);
		return;
	}

	(void) inb_p(HD_STATUS);
#if (HD_DELAY > 0)
	last_req = read_timer();
#endif
	hd_request();
}
Exemple #27
0
Fichier : test.c Projet : sktwj/var
//从请求队列上获取请求操作对象,从请求对象中获得操作参数:读写操作的起始sector和操作字节数,然后将所需的操作执行到硬件上去
//本函数是由blk驱动框架来自动调用的,调用时机由电梯算法调度决定
static void do_ldm_req(struct request_queue *q)
{
	//从请求队列上获取一个请求对象
	struct request *req = blk_fetch_request(q);
	while (req) {
		//从第几个扇区开始操作
		u32 start = blk_rq_pos(req) * SECTOR_SIZE;
		//获得当前请求操作的字节数
		u32 len = blk_rq_cur_bytes(req);

		//检查本次request操作是否越界
		int err = 0;
		if (start + len > DEV_SIZE) {
			printk(KERN_ERR "request region is out of device capacity\n");
			err = -EIO;
			goto err_request;
		}

		//rq_data_dir获得当前请求的操作方向
		//建议在memcpy前后加上打印语句,以便观察读写操作的调度时机
		//数据从内核传输到应用
		if (rq_data_dir(req) == READ) {
			memcpy(req->buffer, (u8*)ldm.addr + start, len);
			printk("read from %d, size %d\n", start, len);
		} else { //数据从应用层传输到内核并写入
			memcpy((u8*)ldm.addr + start, req->buffer, len);
			printk("write from %d, size %d\n", start, len);
		}

		//__blk_end_request_cur:返回false表示当前req的所有操作都完成了,于是下面试图调用blk_fetch_request再从队列上获取新的请求,如果获取不到,则req得到NULL将退出循环;
		//返回true的话说明当前req操作还没完成,继续循环执行
		//err参数可以独立改变__blk_end_request_cur的返回值,err<0时,函数返回false。当发生其他错误时可以用err参数来结束当前req请求,从请求队列上获取新的请求
err_request:
		if (!__blk_end_request_cur(req, err)) {
			req = blk_fetch_request(q);
		}
	}
}
Exemple #28
0
static void htifbd_request(struct request_queue *q)
{
	struct request *req;

	req = blk_fetch_request(q);
	while (req != NULL) {
		struct htifbd_dev *dev;

		dev = req->rq_disk->private_data;
		if (req->cmd_type != REQ_TYPE_FS) {
			pr_notice(DRIVER_NAME ": ignoring non-fs request for %s\n",
				req->rq_disk->disk_name);
			__blk_end_request_all(req, -EIO);
			continue;
		}

		htifbd_transfer(dev, blk_rq_pos(req), blk_rq_cur_sectors(req),
			req->buffer, rq_data_dir(req));
		if (!__blk_end_request_cur(req, 0)) {
			req = blk_fetch_request(q);
		}
	}
}
Exemple #29
0
/*
 * Common read path running under the zvol taskq context.  This function
 * is responsible for copying the requested data out of the DMU and in to
 * a linux request structure.  It then must signal the request queue with
 * an error code describing the result of the copy.
 */
static void
zvol_read(void *arg)
{
	struct request *req = (struct request *)arg;
	struct request_queue *q = req->q;
	zvol_state_t *zv = q->queuedata;
	uint64_t offset = blk_rq_pos(req) << 9;
	uint64_t size = blk_rq_bytes(req);
	int error;
	rl_t *rl;

	rl = zfs_range_lock(&zv->zv_znode, offset, size, RL_READER);

	error = dmu_read_req(zv->zv_objset, ZVOL_OBJ, req);

	zfs_range_unlock(rl);

	/* convert checksum errors into IO errors */
	if (error == ECKSUM)
		error = EIO;

	blk_end_request(req, -error, size);
}
Exemple #30
0
static void
vr_add_rq_rb(struct vr_data *vd, struct request *rq)
{
struct request *alias = elv_rb_add(&vd->sort_list, rq);

if (unlikely(alias)) {
vr_move_request(vd, alias);
alias = elv_rb_add(&vd->sort_list, rq);
BUG_ON(alias);
}

if (blk_rq_pos(rq) >= vd->last_sector) {
if (!vd->next_rq || blk_rq_pos(vd->next_rq) > blk_rq_pos(rq))
vd->next_rq = rq;
}
else {
if (!vd->prev_rq || blk_rq_pos(vd->prev_rq) < blk_rq_pos(rq))
vd->prev_rq = rq;
}

BUG_ON(vd->next_rq && vd->next_rq == vd->prev_rq);
BUG_ON(vd->next_rq && vd->prev_rq && blk_rq_pos(vd->next_rq) < blk_rq_pos(vd->prev_rq));
}