static void ramblock_do_request(struct request_queue * q)
{
	struct request *req;
	printk("do:ramblock_do_request\n");
	req = blk_fetch_request(q);
	while (req) {
		/*源或目的*/
		unsigned long offset = blk_rq_pos(req) * 512;

		/*目的或源*/
		//req->buffer
		
		/*长度*/
		unsigned long len  = blk_rq_cur_bytes(req);
		if (rq_data_dir(req) == READ)
			memcpy(req->buffer, ram_buff+offset, len);
		else
			memcpy(ram_buff+offset, req->buffer, len);

		/* wrap up, 0 = success, -errno = fail */
		if (!__blk_end_request_cur(req, 0))
			req = blk_fetch_request(q);
	}

}
Beispiel #2
0
/*
 * The simple form of the request function.
 */
static void sbull_request(struct request_queue *q)
{
	struct request *req;
	int ret;

	req = blk_fetch_request(q);
	while (req) {
		struct sbull_dev *dev = req->rq_disk->private_data;
		if (req->cmd_type != REQ_TYPE_FS) {
			printk (KERN_NOTICE "Skip non-fs request\n");
			ret = -EIO;
			goto done;
		}
		printk (KERN_NOTICE "Req dev %u dir %d sec %ld, nr %d\n",
			(unsigned)(dev - Devices), rq_data_dir(req),
			blk_rq_pos(req), blk_rq_cur_sectors(req));
		sbull_transfer(dev, blk_rq_pos(req), blk_rq_cur_sectors(req),
				req->buffer, rq_data_dir(req));
		ret = 0;
	done:
		if(!__blk_end_request_cur(req, ret)){
			req = blk_fetch_request(q);
		}
	}
}
Beispiel #3
0
static void simp_blkdev_do_request(struct request_queue *q)
{
   struct request *req ;
   req = blk_fetch_request(q);
   while(req)
   {
       unsigned long start; 
       unsigned long len; 
       int err=0;
       start =blk_rq_pos(req)<<9; 
       len =blk_rq_cur_sectors(req)<<9;
       if(start + len >SIMP_BLKDEV_BYTES)
       {
           printk(KERN_ERR SIMP_BLKDEV_DISKNAME ":bad access:block=%lu,count=%u\n",blk_rq_pos(req), blk_rq_cur_sectors(req));
           err = -EIO;
           goto done;
       }
       if(rq_data_dir(req)==READ)
           memcpy(req->buffer,simp_blkdev_data+start,len);
       else
           memcpy(simp_blkdev_data+start,req->buffer,len);
done:
       if(!__blk_end_request_cur(req,err));
       req =blk_fetch_request(q);
   }
}
void ramhd_req_func(struct request_queue* q) {
    struct request* req;
    RAMHD_DEV* pdev;
    char* pData;
    unsigned long addr, size, start;
    req = blk_fetch_request(q);

    while (req) {
        start = blk_rq_pos(req); // The sector cursor of the current request
        pdev = (RAMHD_DEV*)req->rq_disk->private_data;
        pData = pdev->data;
        addr = (unsigned long)pData + start * RAMHD_SECTOR_SIZE;
        size = blk_rq_cur_bytes(req);

        if (rq_data_dir(req) == READ) {
            memcpy(req->buffer, (char*)addr, size);
        } else {
            memcpy((char*)addr, req->buffer, size);
        }

        if (!__blk_end_request_cur(req, 0)) {
            req = blk_fetch_request(q);
        }
    }
}
Beispiel #5
0
static void do_z2_request(struct request_queue *q)
{
	struct request *req;

	req = blk_fetch_request(q);
	while (req) {
		unsigned long start = blk_rq_pos(req) << 9;
		unsigned long len  = blk_rq_cur_bytes(req);
		int err = 0;

		if (start + len > z2ram_size) {
			printk( KERN_ERR DEVICE_NAME ": bad access: block=%lu, count=%u\n",
				blk_rq_pos(req), blk_rq_cur_sectors(req));
			err = -EIO;
			goto done;
		}
		while (len) {
			unsigned long addr = start & Z2RAM_CHUNKMASK;
			unsigned long size = Z2RAM_CHUNKSIZE - addr;
			if (len < size)
				size = len;
			addr += z2ram_map[ start >> Z2RAM_CHUNKSHIFT ];
			if (rq_data_dir(req) == READ)
				memcpy(req->buffer, (char *)addr, size);
			else
				memcpy((char *)addr, req->buffer, size);
			start += size;
			len -= size;
		}
	done:
		if (!__blk_end_request_cur(req, err))
			req = blk_fetch_request(q);
	}
}
Beispiel #6
0
static void do_ramblk_request(struct request_queue *q )
{
	struct request *req;
//	static volatile int r_cnt = 0;
//	static volatile int w_cnt = 0;
	//printk("ramblk_request_fn %d.\n",cnt++);
	req = blk_fetch_request(q);
	while (req) {
			unsigned long start = blk_rq_pos(req) << 9;
			unsigned long len  = blk_rq_cur_bytes(req);
//			printk("len=%d.\n",len);
		
			if (start + len > RAMBLK_SIZE) {
					printk("RAMBLK_SIZE< start+len");
					goto done;
				}
			
			if (rq_data_dir(req) == READ)
				memcpy(req->buffer, (char *)(start+ramblk_buf), len);
			else
				memcpy((char *)(start+ramblk_buf), req->buffer, len);
		
			done:
				if (!__blk_end_request_cur(req, 0))
					req = blk_fetch_request(q);
	}
	
}
Beispiel #7
0
static void mini2440_ramdisk_request(struct request_queue *q)
{    
	static int r_cnt = 0;
	static int w_cnt = 0;
	struct request *req;
	
	req = blk_fetch_request(q);
	while (req) {
		/* 数据传输三要素: 源,目的,长度 */
		/* 源/目的: */
		unsigned long offset = blk_rq_pos(req) << 9;

		/* 长度: */		
		unsigned long len = blk_rq_cur_bytes(req);

		if (rq_data_dir(req) == READ) {
			printk("[RAMDISK]ramdisk_request read %d\n", ++r_cnt);
			memcpy(req->buffer, mini2440_ramdisk_devp->ramdisk_buffer+offset, len);
		}else {
			printk("[RAMDISK]ramdisk_request write %d\n", ++w_cnt);
			memcpy(mini2440_ramdisk_devp->ramdisk_buffer+offset, req->buffer, len);
		}		
		
		if (!__blk_end_request_cur(req, 0))
			req = blk_fetch_request(q);
		else
			printk("[RAMDISK]__blk_end_request_cur error!\n");
	}
}
Beispiel #8
0
static void cf_request(struct request_queue *q)
{
	struct cf_device *cf;
	struct request *req;
	unsigned block, count;
	int rw, err;

	DPRINTK(DEBUG_CF_REQUEST, "%s: q %p", __FUNCTION__, q);

	req = blk_fetch_request(q);
	while (req) {
		err = -EIO;
		DPRINTK(DEBUG_CF_REQUEST, "%s:%d req %p", __FUNCTION__, __LINE__, req);

		if (!blk_fs_request(req))
			goto done;

		

		block = blk_rq_pos(req);
		count = blk_rq_cur_sectors(req);
		rw = rq_data_dir(req);
		cf = req->rq_disk->private_data;

		DPRINTK(DEBUG_CF_REQUEST, "req %p block %d count %d rw %c\n", req, block, count, (rw == READ)?'R':'W');

		if (block+count > get_capacity(req->rq_disk)) {
			printk("%s: %u+%u is larger than %llu\n", __FUNCTION__, block, count, get_capacity(req->rq_disk));
			goto done;
		}

		/* Grab the R/W semaphore to prevent more than
		 * one request from trying to R/W at the same time */
		err = down_interruptible(&cf->rw_sem);
		if (err)
			break;

		if (rw == READ)
			err = cf_read_sectors(cf, req->buffer, block, count);
		else
			err = cf_write_sectors(cf, req->buffer, block, count);
		up(&cf->rw_sem);

	done:
		DPRINTK(DEBUG_CF_REQUEST, "%s: blk_end_request_cur(%p, %d)\n", __FUNCTION__, req, err);
		if (!__blk_end_request_cur(req, err))
			req = blk_fetch_request(q);
	}
	DPRINTK(DEBUG_CF_REQUEST, "end\n");
	cf_in_request--;
}
Beispiel #9
0
void block_request(struct request_queue *q)
{
  struct request *req;
  unsigned long offset, nbytes;

  req = blk_fetch_request(q);
  while (req != NULL) {
    // Stop looping once we've exhausted the queue.
    // The kernel will call this function whenever
    // there is at least one element in the queue.

    // Check if we support handling this request.
    if (req == NULL || req->cmd_type != REQ_TYPE_FS) {
      // Declare our intention to handle no buffers
      // from this request.  We'll use an IO error
      // to signal that we don't accept requests that
      // aren't related to reading/writing to the
      // filesystem.
      blk_end_request_all(req, -EIO);
      continue;
    }
    
    // Handle the request.

    //
    offset = blk_rq_pos(req) * LOGICAL_BLOCK_SIZE;
    //
    nbytes = blk_rq_cur_sectors(req) * LOGICAL_BLOCK_SIZE;

    if (rq_data_dir(req)) {
      // Check that the write won't exceed the size of the block device.
      if ((offset + nbytes) <= size) {
	// Do write.
	memcpy(data + offset, req->buffer, nbytes);
      }
    } else {
      // Do read.
      memcpy(req->buffer, data + offset, nbytes);
    }

    // Declare our intention to end the request.
    // if buffers still need to be handled, blk_end_request_cur
    // will return true, and we'll continue handling this req.
    if (!blk_end_request_cur(req, 0)) {
      // If not, pop a new request off the queue
      req = blk_fetch_request(q);
    }
  }
}
Beispiel #10
0
static void htifblk_request(struct request_queue *q)
{
	struct htifblk_device *dev;
	struct request *req;
	int ret;

	dev = q->queuedata;
	if (dev->req != NULL)
		return;

	while ((req = blk_fetch_request(q)) != NULL) {
		if (req->cmd_type == REQ_TYPE_FS) {
			ret = htifblk_segment(dev, req);
			if (unlikely(ret)) {
				WARN_ON(__blk_end_request_cur(req, ret));
				continue;
			}
			blk_stop_queue(q);
			break;
		} else {
			blk_dump_rq_flags(req, DRIVER_NAME
				": ignored non-fs request");
			__blk_end_request_all(req, -EIO);
			continue;
		}
	}
}
void bsg_remove_queue(struct request_queue *q)
{
	struct request *req; 
	int counts; 

	if (!q)
		return;

	
	spin_lock_irq(q->queue_lock);
	blk_stop_queue(q);

	
	while (1) {
		req = blk_fetch_request(q);
		
		counts = q->rq.count[0] + q->rq.count[1] +
			 q->rq.starved[0] + q->rq.starved[1];
		spin_unlock_irq(q->queue_lock);
		
		if (counts == 0)
			break;

		if (req) {
			req->errors = -ENXIO;
			blk_end_request_all(req, -ENXIO);
		}

		msleep(200); 
		spin_lock_irq(q->queue_lock);
	}
	bsg_unregister_queue(q);
}
Beispiel #12
0
static void simp_blkdev_do_request(struct request_queue *q)
{
        struct request *req;
        while ((req = blk_fetch_request(q)) != NULL) {
                if ((req->sector + req->current_nr_sectors) << 9
                        > SIMP_BLKDEV_BYTES) {
                        printk(KERN_ERR SIMP_BLKDEV_DISKNAME
                                ": bad request: block=%llu, count=%u\n",
                                (unsigned long long)req->sector,
                                req->current_nr_sectors);
                        end_request(req, 0);
                        continue;
                }

                switch (rq_data_dir(req)) {
                case READ:
                        memcpy(req->buffer,
                                simp_blkdev_data + (req->sector << 9),
                                req->current_nr_sectors << 9);
                        end_request(req, 1);
                        break;
                case WRITE:
                        memcpy(simp_blkdev_data + (req->sector << 9),
                                req->buffer, req->current_nr_sectors << 9);
                        end_request(req, 1);
                        break;
                default:
                        /* No default because rq_data_dir(req) is 1 bit */
                        break;
                }
        }
}
Beispiel #13
0
static void start_request(struct floppy_state *fs)
{
	struct request *req;
	unsigned long x;

	if (fs->state == idle && fs->wanted) {
		fs->state = available;
		wake_up(&fs->wait);
		return;
	}
	while (fs->state == idle) {
		if (!fd_req) {
			fd_req = blk_fetch_request(swim3_queue);
			if (!fd_req)
				break;
		}
		req = fd_req;
#if 0
		printk("do_fd_req: dev=%s cmd=%d sec=%ld nr_sec=%u buf=%p\n",
		       req->rq_disk->disk_name, req->cmd,
		       (long)blk_rq_pos(req), blk_rq_sectors(req), req->buffer);
		printk("           errors=%d current_nr_sectors=%u\n",
		       req->errors, blk_rq_cur_sectors(req));
#endif

		if (blk_rq_pos(req) >= fs->total_secs) {
			swim3_end_request_cur(-EIO);
			continue;
		}
		if (fs->ejected) {
			swim3_end_request_cur(-EIO);
			continue;
		}

		if (rq_data_dir(req) == WRITE) {
			if (fs->write_prot < 0)
				fs->write_prot = swim3_readbit(fs, WRITE_PROT);
			if (fs->write_prot) {
				swim3_end_request_cur(-EIO);
				continue;
			}
		}

		/* Do not remove the cast. blk_rq_pos(req) is now a
		 * sector_t and can be 64 bits, but it will never go
		 * past 32 bits for this driver anyway, so we can
		 * safely cast it down and not have to do a 64/32
		 * division
		 */
		fs->req_cyl = ((long)blk_rq_pos(req)) / fs->secpercyl;
		x = ((long)blk_rq_pos(req)) % fs->secpercyl;
		fs->head = x / fs->secpertrack;
		fs->req_sector = x % fs->secpertrack + 1;
		fd_req = req;
		fs->state = do_transfer;
		fs->retries = 0;

		act(fs);
	}
}
Beispiel #14
0
/*
函数使用elv_next_request()遍历struct request_queue *q中使用struct request *req表
示的每一段,首先判断这个请求是否超过了我们的块设备的最大容量,
然后根据请求的方向rq_data_dir(req)进行相应的请求处理。由于我们使用的是指简单的数组,因此
请求处理仅仅是2条memcpy。
*/
static void simp_blkdev_do_request(struct request_queue* q) {
    struct request* req;

    while ((req = blk_fetch_request(q)) != NULL) {
        if ((blk_rq_pos(req) + blk_rq_cur_sectors(req)) << 9  > SIMP_BLKDEV_BYTES) {
            printk(KERN_ERR SIMP_BLKDEV_DISKNAME": bad request: block=%llu, count=%u\n",
                   (unsigned long long)blk_rq_pos(req), blk_rq_cur_sectors(req)); //req->sector:请求的开始磁道,request.current_nr_sectors:请求磁道数

            blk_end_request_all(req, 0);
            //            end_request(req, 0);//结束一个请求,第2个参数表示请求处理结果,成功时设定为1,失败时设置为0或
            //者错误号。
            continue;
        }

        switch (rq_data_dir(req)) {
        case READ:
            memcpy(req->buffer, simp_blkdev_data + (blk_rq_pos(req) << 9), blk_rq_cur_sectors(req) << 9); //把块设备中的数据装
            //入req->buffer
            blk_end_request_all(req, 1);
            //                end_request(req, 1);
            break;

        case WRITE:
            memcpy(simp_blkdev_data + (blk_rq_pos(req) << 9), req->buffer, blk_rq_cur_sectors(req) << 9); //把req->buffer中的数据写入块设备
            //                end_request(req, 1);
            blk_end_request_all(req, 1);
            break;

        default:
            /* No default because rq_data_dir(req) is 1 bit */
            break;
        }
    }
}
Beispiel #15
0
static int mmc_queue_thread(void *d)
{
	struct mmc_queue *mq = d;
	struct request_queue *q = mq->queue;

	current->flags |= PF_MEMALLOC;

	down(&mq->thread_sem);
	do {
		struct request *req = NULL;

		spin_lock_irq(q->queue_lock);
		set_current_state(TASK_INTERRUPTIBLE);
		req = blk_fetch_request(q);
		mq->req = req;
		spin_unlock_irq(q->queue_lock);

		if (!req) {
			if (kthread_should_stop()) {
				set_current_state(TASK_RUNNING);
				break;
			}
			up(&mq->thread_sem);
			schedule();
			down(&mq->thread_sem);
			continue;
		}
		set_current_state(TASK_RUNNING);

		mq->issue_fn(mq, req);
	} while (1);
	up(&mq->thread_sem);

	return 0;
}
Beispiel #16
0
static void sbd_request_func(struct request_queue *q)
{
	struct request *req;

	while((req = blk_fetch_request(q)) != NULL)
	{
		if (req->cmd_type != REQ_TYPE_FS) {
			printk (KERN_NOTICE "Skip non-fs request\n");

			__blk_end_request_cur(req, -EIO);
			continue;
		}

		if (((blk_rq_pos(req) << 9) + blk_rq_bytes(req)) > SBD_BYTES) {
			printk (KERN_INFO "out of disk boundary\n");

			__blk_end_request_cur(req, -EIO);
			break;
		}

		printk (KERN_INFO "%s, rq_pos << 9 = %lu, rq_bytes = %lu\n",
			(rq_data_dir(req) == WRITE) ? "WRITE" : "READ",
			(unsigned long)(blk_rq_pos(req) << 9),
			(unsigned long)blk_rq_bytes(req));

		if(rq_data_dir(req) == WRITE)
			memcpy(sbd_data + (blk_rq_pos(req) << 9), req->buffer,
				blk_rq_bytes(req));
		else
			memcpy(req->buffer, sbd_data + (blk_rq_pos(req) << 9),
				blk_rq_bytes(req));

		__blk_end_request_cur(req, 0);
	}
}
Beispiel #17
0
static void htifblk_request(struct request_queue *q)
{
	struct htifblk_device *dev;
	struct request *req;
	unsigned long flags;
	int ret;

	dev = q->queuedata;
	spin_lock_irqsave(q->queue_lock, flags);
	if (dev->req != NULL)
		goto out;

	while ((req = blk_fetch_request(q)) != NULL) {
		if (req->cmd_type == REQ_TYPE_FS) {
			ret = htifblk_segment(dev, req);
			if (unlikely(ret)) {
				WARN_ON(__blk_end_request_cur(req, ret));
				continue;
			}
			blk_stop_queue(q);
			break;
		} else {
			blk_dump_rq_flags(req, DRIVER_NAME
				": ignored non-fs request");
			__blk_end_request_all(req, -EIO);
			continue;
		}
	}
out:
	spin_unlock_irqrestore(q->queue_lock, flags);
}
/**ltl
 * 功能
 * 参数
 * 返回值
 * 说明:此函数不能调用blk_end_request_all函数,因为blk_end_request_all持有请求队列锁,用的话就会导致死锁。
 *      Q:为什么调用blk_end_request_all函数就会发生死锁呢?
 */
static void mem_block_requeut_fn(struct request_queue* q)
{
	struct request* req = NULL;
	while(NULL != (req = blk_fetch_request(q)))//
	{
		if(blk_rq_pos(req) + blk_rq_cur_sectors(req) > get_capacity(req->rq_disk))
		{
			__blk_end_request_all(req,-EIO); /* 不能被blk_end_request_all替换 */
			continue;
		}

		switch(rq_data_dir(req))
		{
			case READ:
                {
				    memcpy(req->buffer,g_mem_buf + (blk_rq_pos(req) << 9),blk_rq_cur_bytes(req));
				    __blk_end_request_all(req,0); /* 不能被blk_end_request_all替换 */
				    break;
                }
			case WRITE:
                {
				    memcpy(g_mem_buf + (blk_rq_pos(req) << 9),req->buffer,blk_rq_cur_bytes(req));
				    __blk_end_request_all(req,0); /* 不能被blk_end_request_all替换 */
				    break;
                }
			default:
				__blk_end_request_all(req,-EIO); /* 不能被blk_end_request_all替换 */
				break;
		}
	}
  //  BLK_PLog("req:0x%p",req);
}
void bsg_request_fn(struct request_queue *q)
{
	struct device *dev = q->queuedata;
	struct request *req;
	struct bsg_job *job;
	int ret;

	if (!get_device(dev))
		return;

	while (1) {
		req = blk_fetch_request(q);
		if (!req)
			break;
		spin_unlock_irq(q->queue_lock);

		ret = bsg_create_job(dev, req);
		if (ret) {
			req->errors = ret;
			blk_end_request_all(req, ret);
			spin_lock_irq(q->queue_lock);
			continue;
		}

		job = req->special;
		ret = q->bsg_job_fn(job);
		spin_lock_irq(q->queue_lock);
		if (ret)
			break;
	}

	spin_unlock_irq(q->queue_lock);
	put_device(dev);
	spin_lock_irq(q->queue_lock);
}
Beispiel #20
0
/* 
 * Cette fonction permet de sélectionner une requête dans une file
 * donnée (q) et de l'envoyer à la fonction sbd_transfert afin de la
 * traiter.
 * Une requête peut être composée de plusieurs "morceaux". Dans cette 
 * fonction, chaque "morceau" de la requête sera traité consécutivement
 * jusqu'à ce que cette dernière soit traitée entièrement. 
 */
static void sbd_request(struct request_queue *q) {
	struct request *req; /* Instancie la requête */

	req = blk_fetch_request(q); /* Sélection de la requête dans la file */
	while (req != NULL) { /* Tant que la requête n'est pas nulle, i.e. file de requête n'est pas vide */
		if (req == NULL || (req->cmd_type != REQ_TYPE_FS)) { /* Si requête nulle ou n'ayant pas le type "fs", i.e. s'il ne s'agit pas d'une requête liée au système de fichiers */
			printk (KERN_NOTICE "Skip non-CMD request\n"); /* Inscription dans syslog de la non-exécution de la requête */
			__blk_end_request_all(req, -EIO); /* Finition de la requête */
			continue; /* Ignore les instructions suivantes et effectue un nouveau tour de boucle */
		}
		sbd_transfer(&Device, blk_rq_pos(req), blk_rq_cur_sectors(req),
				req->buffer, rq_data_dir(req)); /* Traitement de la requete */
		if ( ! __blk_end_request_cur(req, 0) ) { /* Si la requete n'est pas complètement traitée */
			req = blk_fetch_request(q); /* Sélectionne la suite de la requête dans la file */
		}
	}
}
Beispiel #21
0
static int stheno_request_thread( void *arg )
{
    struct request *req;
    int ret;

    while( 1 ){
        ret = wait_event_interruptible( stheno_wait_q, (kthread_should_stop() || stheno_wakeup == 1) );
        if( ret != 0 ) break;

        stheno_wakeup = 0;

        if( kthread_should_stop() ) break;

        while( 1 ){
            spin_lock_irq( stheno_queue->queue_lock );
            req = blk_fetch_request( stheno_queue );
            spin_unlock_irq( stheno_queue->queue_lock );
        next_segment:
            if( req == NULL ) break;

            if( !blk_fs_request( req ) ){
                /*blk_end_request_cur( req, -EIO );*/
                spin_lock_irq( stheno_queue->queue_lock );
                ret = __blk_end_request_cur( req, -EIO );
                spin_unlock_irq( stheno_queue->queue_lock );
                if( ret == true ) goto next_segment;
                continue;
            }
            if( stheno_read_sector0() != 0 ){
                spin_lock_irq( stheno_queue->queue_lock );
                ret = __blk_end_request_cur( req, -EIO );
                spin_unlock_irq( stheno_queue->queue_lock );
                if( ret == true ) goto next_segment;
                continue;
            }
            if( blk_rq_sectors( req ) == 0 || blk_rq_cur_sectors( req ) == 0 ){
                spin_lock_irq( stheno_queue->queue_lock );
                ret = __blk_end_request_cur( req, -EIO );
                spin_unlock_irq( stheno_queue->queue_lock );
                if( ret == true ) goto next_segment;
                continue;
            }
            if( rq_data_dir( req ) == 0 ){
                ret = euryale_read_process( stheno_lbaoffset + blk_rq_pos( req ), blk_rq_cur_sectors( req ), req->buffer );
            }else{
                ret = euryale_write_process( stheno_lbaoffset + blk_rq_pos( req ), blk_rq_cur_sectors( req ), req->buffer );
            }
            /*blk_end_request_cur( req, ret == 0 ? 0 : -EIO );*/
            spin_lock_irq( stheno_queue->queue_lock );
            ret = __blk_end_request_cur( req, ret == 0 ? 0 : -EIO );
            spin_unlock_irq( stheno_queue->queue_lock );
            if( ret == true ) goto next_segment;
        }
    }
    print_debug("stheno_request_thread was terminated.\n");
    return 0;
}
Beispiel #22
0
/*
 * Common request path.  Rather than registering a custom make_request()
 * function we use the generic Linux version.  This is done because it allows
 * us to easily merge read requests which would otherwise we performed
 * synchronously by the DMU.  This is less critical in write case where the
 * DMU will perform the correct merging within a transaction group.  Using
 * the generic make_request() also let's use leverage the fact that the
 * elevator with ensure correct ordering in regards to barrior IOs.  On
 * the downside it means that in the write case we end up doing request
 * merging twice once in the elevator and once in the DMU.
 *
 * The request handler is called under a spin lock so all the real work
 * is handed off to be done in the context of the zvol taskq.  This function
 * simply performs basic request sanity checking and hands off the request.
 */
static void
zvol_request(struct request_queue *q)
{
	zvol_state_t *zv = q->queuedata;
	struct request *req;
	unsigned int size;

	while ((req = blk_fetch_request(q)) != NULL) {
		size = blk_rq_bytes(req);

		if (size != 0 && blk_rq_pos(req) + blk_rq_sectors(req) >
		    get_capacity(zv->zv_disk)) {
			printk(KERN_INFO
			       "%s: bad access: block=%llu, count=%lu\n",
			       req->rq_disk->disk_name,
			       (long long unsigned)blk_rq_pos(req),
			       (long unsigned)blk_rq_sectors(req));
			__blk_end_request(req, -EIO, size);
			continue;
		}

		if (!blk_fs_request(req)) {
			printk(KERN_INFO "%s: non-fs cmd\n",
			       req->rq_disk->disk_name);
			__blk_end_request(req, -EIO, size);
			continue;
		}

		switch (rq_data_dir(req)) {
		case READ:
			zvol_dispatch(zvol_read, req);
			break;
		case WRITE:
			if (unlikely(get_disk_ro(zv->zv_disk)) ||
			    unlikely(zv->zv_flags & ZVOL_RDONLY)) {
				__blk_end_request(req, -EROFS, size);
				break;
			}

#ifdef HAVE_BLK_QUEUE_DISCARD
			if (req->cmd_flags & VDEV_REQ_DISCARD) {
				zvol_dispatch(zvol_discard, req);
				break;
			}
#endif /* HAVE_BLK_QUEUE_DISCARD */

			zvol_dispatch(zvol_write, req);
			break;
		default:
			printk(KERN_INFO "%s: unknown cmd: %d\n",
			       req->rq_disk->disk_name, (int)rq_data_dir(req));
			__blk_end_request(req, -EIO, size);
			break;
		}
	}
}
Beispiel #23
0
Datei: vbd.c Projekt: truncs/vbd
/*
 * Service each request in the queue. If the request
 * is not a REQ_TYPE_FS type then just skip the request
 * notifying that it is skipping this request.
 */
static void vbd_request(struct request_queue * q) {
  struct request *req;
  req = blk_fetch_request(q);

  while(req != NULL) {

	/* This should not happen normally but just in case */
	if(req == NULL || (req->cmd_type != REQ_TYPE_FS)) {
	  printk(KERN_NOTICE "Skip non fs type request\n");
	  __blk_end_request_all(req, -EIO);
	  continue;
	}

	vbd_tx(&device,blk_rq_pos(req), blk_rq_cur_sectors(req),
		   req->buffer, rq_data_dir(req));
	if(!__blk_end_request_cur(req, 0))
	  req = blk_fetch_request(q);
  }
}
Beispiel #24
0
static void my_request(struct request_queue *q)
{
	struct request *rq;
	int size, res = 0;
	char *ptr;
	unsigned nr_sectors, sector;
	printk(KERN_INFO "entering request routine\n");

	rq = blk_fetch_request(q);
	while (rq) {
		if (!blk_fs_request(rq)) {
			printk(KERN_WARNING
			       "This was not a normal fs request, skipping\n");
			goto done;
		}
		nr_sectors = blk_rq_cur_sectors(rq);
		sector = blk_rq_pos(rq);

		ptr = ramdisk + sector * sector_size;
		size = nr_sectors * sector_size;

		if ((ptr + size) > (ramdisk + disk_size)) {
			printk(KERN_WARNING
			       " tried to go past end of device\n");
			goto done;
		}

		if (rq_data_dir(rq)) {
			printk(KERN_INFO "writing at sector %d, %u sectors \n",
			       sector, nr_sectors);
			memcpy(ptr, rq->buffer, size);
		} else {
			printk(KERN_INFO "reading at sector %d, %u sectors \n",
			       sector, nr_sectors);
			memcpy(rq->buffer, ptr, size);
		}
	      done:
		if (!__blk_end_request_cur(rq, res))
			rq = blk_fetch_request(q);
	}
	printk(KERN_INFO "leaving request\n");
}
Beispiel #25
0
static void sb_request(struct request_queue *q) {
	struct request *req;
	int error;

	req = blk_fetch_request(q);
	while (req != NULL) {
		/* Check request type */
		if (req == NULL || (req->cmd_type != REQ_TYPE_FS)) {
			__blk_end_request_all(req, -EIO);
			continue;
		}
		/* Do transfer */
		error = sb_transfer(sbd, blk_rq_pos(req), blk_rq_cur_sectors(req), req->buffer, rq_data_dir(req));
		if (!__blk_end_request_cur(req, error ? -EIO : 0) ) {
			req = blk_fetch_request(q);
		}
	}

	return;
}
Beispiel #26
0
static void ndas_request(struct request_queue *q)
{
	struct request *req;

	func();

	while ((req = blk_fetch_request(q)) != NULL) {
		printk(KERN_INFO "request received\n");
		__blk_end_request_all(req, 0);
	}
}
Beispiel #27
0
static void looper_request(struct request_queue *q) {
 
  struct request *req;
 
  printk(KERN_INFO "looper: executing request");
   
  req = blk_fetch_request(q);
  while (req != NULL) {
    if (req == NULL || (req->cmd_type != REQ_TYPE_FS)) {
      printk (KERN_NOTICE "Skip non-CMD request\n");
      __blk_end_request_all(req, -EIO);
      continue;
    }
    looper_transfer(&Device, blk_rq_pos(req), blk_rq_cur_sectors(req),
		 req->buffer, rq_data_dir(req));
    if ( ! __blk_end_request_cur(req, 0) ) {
      req = blk_fetch_request(q);
    }
  }
}
Beispiel #28
0
/*
 * Simply used for requesting a transfer (read or write) of
 * data from the RAM disk.
 */
static void osurd_request(struct request_queue *q)
{
    struct request *req;
    req = blk_fetch_request(q);

    while(req != NULL) {
        struct osurd_dev *dev = req->rq_disk->private_data;
        if(req->cmd_type != REQ_TYPE_FS) {
            printk(KERN_NOTICE "Skip non-fs request\n");
            __blk_end_request_all(req, -EIO);
            continue;
        }
        osurd_transfer(dev, blk_rq_pos(req), blk_rq_cur_sectors(req),
                       req->buffer, rq_data_dir(req)):

            if(!__blk_end_request_cur(req, 0)) {
            req = blk_fetch_request(q);
        }
    }
}
Beispiel #29
0
Datei: sbd.c Projekt: OSLL/ioperf
static void sbd_request(struct request_queue *q) {
    struct request *req;

    req = blk_fetch_request(q);
    while (req != NULL) {
        // blk_fs_request() was removed in 2.6.36 - many thanks to
        // Christian Paro for the heads up and fix...
        //if (!blk_fs_request(req)) {
        if (req == NULL || (req->cmd_type != REQ_TYPE_FS)) {
            printk (KERN_NOTICE "Skip non-CMD request\n");
            __blk_end_request_all(req, -EIO);
            continue;
        }
        sbd_transfer(&Device, blk_rq_pos(req), blk_rq_cur_sectors(req),
                req->buffer, rq_data_dir(req));
        if ( ! __blk_end_request_cur(req, 0) ) {
            req = blk_fetch_request(q);
        }
    }
}
Beispiel #30
0
static void null_request_fn(struct request_queue *q)
{
	struct request *rq;

	while ((rq = blk_fetch_request(q)) != NULL) {
		struct nullb_cmd *cmd = rq->special;

		spin_unlock_irq(q->queue_lock);
		null_handle_cmd(cmd);
		spin_lock_irq(q->queue_lock);
	}
}