Example #1
0
/*
函数使用elv_next_request()遍历struct request_queue *q中使用struct request *req表
示的每一段,首先判断这个请求是否超过了我们的块设备的最大容量,
然后根据请求的方向rq_data_dir(req)进行相应的请求处理。由于我们使用的是指简单的数组,因此
请求处理仅仅是2条memcpy。
*/
static void simp_blkdev_do_request(struct request_queue* q) {
    struct request* req;

    while ((req = blk_fetch_request(q)) != NULL) {
        if ((blk_rq_pos(req) + blk_rq_cur_sectors(req)) << 9  > SIMP_BLKDEV_BYTES) {
            printk(KERN_ERR SIMP_BLKDEV_DISKNAME": bad request: block=%llu, count=%u\n",
                   (unsigned long long)blk_rq_pos(req), blk_rq_cur_sectors(req)); //req->sector:请求的开始磁道,request.current_nr_sectors:请求磁道数

            blk_end_request_all(req, 0);
            //            end_request(req, 0);//结束一个请求,第2个参数表示请求处理结果,成功时设定为1,失败时设置为0或
            //者错误号。
            continue;
        }

        switch (rq_data_dir(req)) {
        case READ:
            memcpy(req->buffer, simp_blkdev_data + (blk_rq_pos(req) << 9), blk_rq_cur_sectors(req) << 9); //把块设备中的数据装
            //入req->buffer
            blk_end_request_all(req, 1);
            //                end_request(req, 1);
            break;

        case WRITE:
            memcpy(simp_blkdev_data + (blk_rq_pos(req) << 9), req->buffer, blk_rq_cur_sectors(req) << 9); //把req->buffer中的数据写入块设备
            //                end_request(req, 1);
            blk_end_request_all(req, 1);
            break;

        default:
            /* No default because rq_data_dir(req) is 1 bit */
            break;
        }
    }
}
Example #2
0
/*
 * Request completion handler for request-based dm
 */
static void dm_softirq_done(struct request *rq)
{
	bool mapped = true;
	struct dm_rq_target_io *tio = tio_from_request(rq);
	struct request *clone = tio->clone;
	int rw;

	if (!clone) {
		struct mapped_device *md = tio->md;

		rq_end_stats(md, rq);
		rw = rq_data_dir(rq);
		if (!rq->q->mq_ops)
			blk_end_request_all(rq, tio->error);
		else
			blk_mq_end_request(rq, tio->error);
		rq_completed(md, rw, false);
		return;
	}

	if (rq->rq_flags & RQF_FAILED)
		mapped = false;

	dm_done(clone, tio->error, mapped);
}
void bsg_request_fn(struct request_queue *q)
{
	struct device *dev = q->queuedata;
	struct request *req;
	struct bsg_job *job;
	int ret;

	if (!get_device(dev))
		return;

	while (1) {
		req = blk_fetch_request(q);
		if (!req)
			break;
		spin_unlock_irq(q->queue_lock);

		ret = bsg_create_job(dev, req);
		if (ret) {
			req->errors = ret;
			blk_end_request_all(req, ret);
			spin_lock_irq(q->queue_lock);
			continue;
		}

		job = req->special;
		ret = q->bsg_job_fn(job);
		spin_lock_irq(q->queue_lock);
		if (ret)
			break;
	}

	spin_unlock_irq(q->queue_lock);
	put_device(dev);
	spin_lock_irq(q->queue_lock);
}
void bsg_remove_queue(struct request_queue *q)
{
	struct request *req; 
	int counts; 

	if (!q)
		return;

	
	spin_lock_irq(q->queue_lock);
	blk_stop_queue(q);

	
	while (1) {
		req = blk_fetch_request(q);
		
		counts = q->rq.count[0] + q->rq.count[1] +
			 q->rq.starved[0] + q->rq.starved[1];
		spin_unlock_irq(q->queue_lock);
		
		if (counts == 0)
			break;

		if (req) {
			req->errors = -ENXIO;
			blk_end_request_all(req, -ENXIO);
		}

		msleep(200); 
		spin_lock_irq(q->queue_lock);
	}
	bsg_unregister_queue(q);
}
Example #5
0
/*
 * Request completion handler for request-based dm
 */
static void dm_softirq_done(struct request *rq)
{
	bool mapped = true;
	struct dm_rq_target_io *tio = tio_from_request(rq);
	struct request *clone = tio->clone;
	int rw;

	if (!clone) {
		rq_end_stats(tio->md, rq);
		rw = rq_data_dir(rq);
		if (!rq->q->mq_ops) {
			blk_end_request_all(rq, tio->error);
			rq_completed(tio->md, rw, false);
			free_old_rq_tio(tio);
		} else {
			blk_mq_end_request(rq, tio->error);
			rq_completed(tio->md, rw, false);
		}
		return;
	}

	if (rq->cmd_flags & REQ_FAILED)
		mapped = false;

	dm_done(clone, tio->error, mapped);
}
Example #6
0
/*
 * Complete the clone and the original request.
 * Must be called without clone's queue lock held,
 * see end_clone_request() for more details.
 */
static void dm_end_request(struct request *clone, int error)
{
	int rw = rq_data_dir(clone);
	struct dm_rq_target_io *tio = clone->end_io_data;
	struct mapped_device *md = tio->md;
	struct request *rq = tio->orig;

	if (rq->cmd_type == REQ_TYPE_BLOCK_PC) {
		rq->errors = clone->errors;
		rq->resid_len = clone->resid_len;

		if (rq->sense)
			/*
			 * We are using the sense buffer of the original
			 * request.
			 * So setting the length of the sense data is enough.
			 */
			rq->sense_len = clone->sense_len;
	}

	free_rq_clone(clone);
	rq_end_stats(md, rq);
	if (!rq->q->mq_ops)
		blk_end_request_all(rq, error);
	else
		blk_mq_end_request(rq, error);
	rq_completed(md, rw, true);
}
static void bsg_softirq_done(struct request *rq)
{
	struct bsg_job *job = rq->special;

	blk_end_request_all(rq, rq->errors);
	bsg_destroy_job(job);
}
Example #8
0
static void end_cmd(struct nullb_cmd *cmd)
{
	struct request_queue *q = NULL;

	if (cmd->rq)
		q = cmd->rq->q;

	switch (queue_mode)  {
	case NULL_Q_MQ:
		blk_mq_end_request(cmd->rq, 0);
		return;
	case NULL_Q_RQ:
		INIT_LIST_HEAD(&cmd->rq->queuelist);
		blk_end_request_all(cmd->rq, 0);
		break;
	case NULL_Q_BIO:
		bio_endio(cmd->bio);
		break;
	}

	free_cmd(cmd);

	/* Restart queue if needed, as we are freeing a tag */
	if (queue_mode == NULL_Q_RQ && blk_queue_stopped(q)) {
		unsigned long flags;

		spin_lock_irqsave(q->queue_lock, flags);
		blk_start_queue_async(q);
		spin_unlock_irqrestore(q->queue_lock, flags);
	}
}
Example #9
0
File: sbull.c Project: l3b2w1/ldd
/*
 * The simple form of the request function.
 */
static void sbull_request(request_queue_t *q)
{
	struct request *req = NULL;
	struct sbull_dev *dev = req->rq_disk->private_data;
	
	while ((req = blk_fetch_request(q)) != NULL) {
		if (! req->cmd_type != REQ_TYPE_FS) {
			printk (KERN_NOTICE "Skip non-fs request\n");
			blk_end_request_all(req, -EIO);
			continue;
		}
		
		sbull_transfer(dev, blk_rq_pos(req), blk_rq_cur_sectors(req),
				req->buffer, rq_data_dir(req));
		blk_end_request_all(req, 1);
	}
}
Example #10
0
/*
 * simp_blkdev_make_request
 */
static void simp_blkdev_do_request(struct request_queue *q)
{
	struct request *req;
	struct req_iterator ri;
	struct bio_vec *bvec;
	char *disk_mem;
	char *buffer;
		
	while ((req = blk_fetch_request(q)) != NULL) {
		if ((blk_rq_pos(req) << 9) + blk_rq_cur_bytes(req)
			> SIMP_BLKDEV_BYTES) {
			printk(KERN_ERR SIMP_BLKDEV_DISKNAME
				": bad request: block=%llu, count=%u\n",
				(unsigned long long)blk_rq_pos(req),
				blk_rq_cur_bytes(req));
				blk_end_request_all(req, -EIO);
			continue;
		}
		
		disk_mem = simp_blkdev_data + (blk_rq_pos(req) << 9);
		switch (rq_data_dir(req)) {
		case READ:	
			rq_for_each_segment(bvec, req, ri)
			{
				buffer = kmap(bvec->bv_page) + bvec->bv_offset;
				memcpy(buffer, disk_mem, bvec->bv_len);
				kunmap(bvec->bv_page);
				disk_mem += bvec->bv_len;
			}
			
			/*memcpy(req->buffer,
			simp_blkdev_data + (blk_rq_pos(req) << 9),
			blk_rq_cur_bytes(req));*/
			__blk_end_request_all(req, 0);
			break;
		case WRITE:		
			rq_for_each_segment(bvec, req, ri)
			{
				buffer = kmap(bvec->bv_page) + bvec->bv_offset;
				memcpy(disk_mem, buffer, bvec->bv_len);
				kunmap(bvec->bv_page);
				disk_mem += bvec->bv_len;
			}
			/*memcpy(simp_blkdev_data + (blk_rq_pos(req) << 9),
			req->buffer, blk_rq_cur_bytes(req));*/
			__blk_end_request_all(req, 0);
			break;
		default:
			/* No default because rq_data_dir(req) is 1 bit */
			break;
		}
Example #11
0
void block_request(struct request_queue *q)
{
  struct request *req;
  unsigned long offset, nbytes;

  req = blk_fetch_request(q);
  while (req != NULL) {
    // Stop looping once we've exhausted the queue.
    // The kernel will call this function whenever
    // there is at least one element in the queue.

    // Check if we support handling this request.
    if (req == NULL || req->cmd_type != REQ_TYPE_FS) {
      // Declare our intention to handle no buffers
      // from this request.  We'll use an IO error
      // to signal that we don't accept requests that
      // aren't related to reading/writing to the
      // filesystem.
      blk_end_request_all(req, -EIO);
      continue;
    }
    
    // Handle the request.

    //
    offset = blk_rq_pos(req) * LOGICAL_BLOCK_SIZE;
    //
    nbytes = blk_rq_cur_sectors(req) * LOGICAL_BLOCK_SIZE;

    if (rq_data_dir(req)) {
      // Check that the write won't exceed the size of the block device.
      if ((offset + nbytes) <= size) {
	// Do write.
	memcpy(data + offset, req->buffer, nbytes);
      }
    } else {
      // Do read.
      memcpy(req->buffer, data + offset, nbytes);
    }

    // Declare our intention to end the request.
    // if buffers still need to be handled, blk_end_request_cur
    // will return true, and we'll continue handling this req.
    if (!blk_end_request_cur(req, 0)) {
      // If not, pop a new request off the queue
      req = blk_fetch_request(q);
    }
  }
}
Example #12
0
static void end_cmd(struct nullb_cmd *cmd)
{
	if (cmd->rq) {
		if (queue_mode == NULL_Q_MQ)
			blk_mq_end_io(cmd->rq, 0);
		else {
			INIT_LIST_HEAD(&cmd->rq->queuelist);
			blk_end_request_all(cmd->rq, 0);
		}
	} else if (cmd->bio)
		bio_endio(cmd->bio, 0);

	if (queue_mode != NULL_Q_MQ)
		free_cmd(cmd);
}
Example #13
0
/*
 * Post finished request.
 */
static void
__tapeblock_end_request(struct tape_request *ccw_req, void *data)
{
	struct tape_device *device;
	struct request *req;

	DBF_LH(6, "__tapeblock_end_request()\n");

	device = ccw_req->device;
	req = (struct request *) data;
	blk_end_request_all(req, (ccw_req->rc == 0) ? 0 : -EIO);
	if (ccw_req->rc == 0)
		/* Update position. */
		device->blk_data.block_position =
		  (blk_rq_pos(req) + blk_rq_sectors(req)) >> TAPEBLOCK_HSEC_S2B;
	else
Example #14
0
/*
 * Complete the clone and the original request.
 * Must be called without clone's queue lock held,
 * see end_clone_request() for more details.
 */
static void dm_end_request(struct request *clone, blk_status_t error)
{
	int rw = rq_data_dir(clone);
	struct dm_rq_target_io *tio = clone->end_io_data;
	struct mapped_device *md = tio->md;
	struct request *rq = tio->orig;

	blk_rq_unprep_clone(clone);
	tio->ti->type->release_clone_rq(clone);

	rq_end_stats(md, rq);
	if (!rq->q->mq_ops)
		blk_end_request_all(rq, error);
	else
		blk_mq_end_request(rq, error);
	rq_completed(md, rw, true);
}
Example #15
0
static void end_cmd(struct nullb_cmd *cmd)
{
	switch (queue_mode)  {
	case NULL_Q_MQ:
		blk_mq_end_request(cmd->rq, 0);
		return;
	case NULL_Q_RQ:
		INIT_LIST_HEAD(&cmd->rq->queuelist);
		blk_end_request_all(cmd->rq, 0);
		break;
	case NULL_Q_BIO:
		bio_endio(cmd->bio, 0);
		break;
	}

	free_cmd(cmd);
}
Example #16
0
/**
 * bsg_remove_queue - Deletes the bsg dev from the q
 * @q:	the request_queue that is to be torn down.
 *
 * Notes:
 *   Before unregistering the queue empty any requests that are blocked
 */
void bsg_remove_queue(struct request_queue *q)
{
	struct request *req; /* block request */
	int counts; /* totals for request_list count and starved */

	if (!q)
		return;

	/* Stop taking in new requests */
	spin_lock_irq(q->queue_lock);
	blk_stop_queue(q);

	/* drain all requests in the queue */
	while (1) {
		/* need the lock to fetch a request
		 * this may fetch the same reqeust as the previous pass
		 */
		req = blk_fetch_request(q);
		/* save requests in use and starved */
		counts = q->root_rl.count[0] + q->root_rl.count[1] +
			 q->root_rl.starved[0] + q->root_rl.starved[1];
		spin_unlock_irq(q->queue_lock);
		/* any requests still outstanding? */
		if (counts == 0)
			break;

		/* This may be the same req as the previous iteration,
		 * always send the blk_end_request_all after a prefetch.
		 * It is not okay to not end the request because the
		 * prefetch started the request.
		 */
		if (req) {
			/* return -ENXIO to indicate that this queue is
			 * going away
			 */
			req->errors = -ENXIO;
			blk_end_request_all(req, -ENXIO);
		}

		msleep(200); /* allow bsg to possibly finish */
		spin_lock_irq(q->queue_lock);
	}
	bsg_unregister_queue(q);
}
Example #17
0
/*
 * Message receiver(workqueue)
 */
static void mbox_rx_work(struct work_struct *work)
{
	struct omap_mbox_queue *mq =
			container_of(work, struct omap_mbox_queue, work);
	struct omap_mbox *mbox = mq->queue->queuedata;
	struct request_queue *q = mbox->rxq->queue;
	struct request *rq;
	mbox_msg_t msg;
	unsigned long flags;

	while (1) {
		spin_lock_irqsave(q->queue_lock, flags);
		rq = blk_fetch_request(q);
		spin_unlock_irqrestore(q->queue_lock, flags);
		if (!rq)
			break;

		msg = (mbox_msg_t)rq->special;
		blk_end_request_all(rq, 0);
		mbox->rxq->callback((void *)msg);
	}
}
Example #18
0
static void mbox_tx_tasklet(unsigned long tx_data)
{
	int ret;
	struct request *rq;
	struct omap_mbox *mbox = (struct omap_mbox *)tx_data;
	struct request_queue *q = mbox->txq->queue;

	while (1) {

		rq = blk_fetch_request(q);

		if (!rq)
			break;

		ret = __mbox_msg_send(mbox, (mbox_msg_t)rq->special);
		if (ret) {
			omap_mbox_enable_irq(mbox, IRQ_TX);
			blk_requeue_request(q, rq);
			return;
		}
		blk_end_request_all(rq, 0);
	}
}
Example #19
0
static void null_softirq_done_fn(struct request *rq)
{
	blk_end_request_all(rq, 0);
}
Example #20
0
/*
 * Thread for srb
 */
static int srb_thread(void *data)
{
	struct srb_device_s *dev;
	struct request *req;
	unsigned long flags;
	int th_id;
	int th_ret = 0;
	char buff[256];
	struct req_iterator iter;
#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 14, 0)
	struct bio_vec *bvec;
#else
	struct bio_vec bvec;
#endif
	struct srb_cdmi_desc_s *cdmi_desc;

	SRBDEV_LOG_DEBUG(((struct srb_device_s *)data), "Thread started with device %p", data);

	dev = data;

	/* Init thread specific values */
	spin_lock(&devtab_lock);
	th_id = dev->nb_threads;
	dev->nb_threads++;
	spin_unlock(&devtab_lock);

	set_user_nice(current, -20);
	while (!kthread_should_stop() || !list_empty(&dev->waiting_queue)) {
		/* wait for something to do */
		wait_event_interruptible(dev->waiting_wq,
					kthread_should_stop() ||
					!list_empty(&dev->waiting_queue));

		/* TODO: improve kthread termination, otherwise calling we can not
		  terminate a kthread calling kthread_stop() */
		/* if (kthread_should_stop()) {
			printk(KERN_INFO "srb_thread: immediate kthread exit\n");
			do_exit(0);
		} */

		spin_lock_irqsave(&dev->waiting_lock, flags);
		/* extract request */
		if (list_empty(&dev->waiting_queue)) {
			spin_unlock_irqrestore(&dev->waiting_lock, flags);
			continue;
		}
		req = list_entry(dev->waiting_queue.next, struct request,
				queuelist);
		list_del_init(&req->queuelist);
		spin_unlock_irqrestore(&dev->waiting_lock, flags);
		
		if (blk_rq_sectors(req) == 0) {
			blk_end_request_all(req, 0);
			continue;
		}

		req_flags_to_str(req->cmd_flags, buff);
		SRBDEV_LOG_DEBUG(dev, "thread %d: New REQ of type %s (%d) flags: %s (%llu)",
				 th_id, req_code_to_str(rq_data_dir(req)), rq_data_dir(req), buff,
                                 (unsigned long long)req->cmd_flags);
		if (req->cmd_flags & REQ_FLUSH) {
			SRBDEV_LOG_DEBUG(dev, "DEBUG CMD REQ_FLUSH\n");
		}
		/* XXX: Use iterator instead of internal function (cf linux/blkdev.h)
		 *  __rq_for_each_bio(bio, req) {
		 */
		rq_for_each_segment(bvec, req, iter) {
			if (iter.bio->bi_rw & REQ_FLUSH) {
				SRBDEV_LOG_DEBUG(dev, "DEBUG VR BIO REQ_FLUSH\n");
			}
		}

		/* Create scatterlist */
		cdmi_desc = dev->thread_cdmi_desc[th_id];
		sg_init_table(dev->thread_cdmi_desc[th_id]->sgl, DEV_NB_PHYS_SEGS);
		dev->thread_cdmi_desc[th_id]->sgl_size = blk_rq_map_sg(dev->q, req, dev->thread_cdmi_desc[th_id]->sgl);

		SRBDEV_LOG_DEBUG(dev, "scatter_list size %d [nb_seg = %d,"
		                 " sector = %lu, nr_sectors=%u w=%d]",
		                 DEV_NB_PHYS_SEGS,
		                 dev->thread_cdmi_desc[th_id]->sgl_size,
		                 blk_rq_pos(req), blk_rq_sectors(req),
		                 rq_data_dir(req) == WRITE);

		/* Call scatter function */
		th_ret = srb_xfer_scl(dev, dev->thread_cdmi_desc[th_id], req);

		SRBDEV_LOG_DEBUG(dev, "thread %d: REQ done with returned code %d",
		                 th_id, th_ret);
	
		/* No IO error testing for the moment */
		blk_end_request_all(req, 0);
	}

	return 0;
}