Example #1
0
/*
 * The simple form of the request function.
 */
static void sbull_request(struct request_queue *q)
{
	struct request *req;
	int ret;

	req = blk_fetch_request(q);
	while (req) {
		struct sbull_dev *dev = req->rq_disk->private_data;
		if (req->cmd_type != REQ_TYPE_FS) {
			printk (KERN_NOTICE "Skip non-fs request\n");
			ret = -EIO;
			goto done;
		}
		printk (KERN_NOTICE "Req dev %u dir %d sec %ld, nr %d\n",
			(unsigned)(dev - Devices), rq_data_dir(req),
			blk_rq_pos(req), blk_rq_cur_sectors(req));
		sbull_transfer(dev, blk_rq_pos(req), blk_rq_cur_sectors(req),
				req->buffer, rq_data_dir(req));
		ret = 0;
	done:
		if(!__blk_end_request_cur(req, ret)){
			req = blk_fetch_request(q);
		}
	}
}
Example #2
0
/*
函数使用elv_next_request()遍历struct request_queue *q中使用struct request *req表
示的每一段,首先判断这个请求是否超过了我们的块设备的最大容量,
然后根据请求的方向rq_data_dir(req)进行相应的请求处理。由于我们使用的是指简单的数组,因此
请求处理仅仅是2条memcpy。
*/
static void simp_blkdev_do_request(struct request_queue* q) {
    struct request* req;

    while ((req = blk_fetch_request(q)) != NULL) {
        if ((blk_rq_pos(req) + blk_rq_cur_sectors(req)) << 9  > SIMP_BLKDEV_BYTES) {
            printk(KERN_ERR SIMP_BLKDEV_DISKNAME": bad request: block=%llu, count=%u\n",
                   (unsigned long long)blk_rq_pos(req), blk_rq_cur_sectors(req)); //req->sector:请求的开始磁道,request.current_nr_sectors:请求磁道数

            blk_end_request_all(req, 0);
            //            end_request(req, 0);//结束一个请求,第2个参数表示请求处理结果,成功时设定为1,失败时设置为0或
            //者错误号。
            continue;
        }

        switch (rq_data_dir(req)) {
        case READ:
            memcpy(req->buffer, simp_blkdev_data + (blk_rq_pos(req) << 9), blk_rq_cur_sectors(req) << 9); //把块设备中的数据装
            //入req->buffer
            blk_end_request_all(req, 1);
            //                end_request(req, 1);
            break;

        case WRITE:
            memcpy(simp_blkdev_data + (blk_rq_pos(req) << 9), req->buffer, blk_rq_cur_sectors(req) << 9); //把req->buffer中的数据写入块设备
            //                end_request(req, 1);
            blk_end_request_all(req, 1);
            break;

        default:
            /* No default because rq_data_dir(req) is 1 bit */
            break;
        }
    }
}
Example #3
0
static int stheno_request_thread( void *arg )
{
    struct request *req;
    int ret;

    while( 1 ){
        ret = wait_event_interruptible( stheno_wait_q, (kthread_should_stop() || stheno_wakeup == 1) );
        if( ret != 0 ) break;

        stheno_wakeup = 0;

        if( kthread_should_stop() ) break;

        while( 1 ){
            spin_lock_irq( stheno_queue->queue_lock );
            req = blk_fetch_request( stheno_queue );
            spin_unlock_irq( stheno_queue->queue_lock );
        next_segment:
            if( req == NULL ) break;

            if( !blk_fs_request( req ) ){
                /*blk_end_request_cur( req, -EIO );*/
                spin_lock_irq( stheno_queue->queue_lock );
                ret = __blk_end_request_cur( req, -EIO );
                spin_unlock_irq( stheno_queue->queue_lock );
                if( ret == true ) goto next_segment;
                continue;
            }
            if( stheno_read_sector0() != 0 ){
                spin_lock_irq( stheno_queue->queue_lock );
                ret = __blk_end_request_cur( req, -EIO );
                spin_unlock_irq( stheno_queue->queue_lock );
                if( ret == true ) goto next_segment;
                continue;
            }
            if( blk_rq_sectors( req ) == 0 || blk_rq_cur_sectors( req ) == 0 ){
                spin_lock_irq( stheno_queue->queue_lock );
                ret = __blk_end_request_cur( req, -EIO );
                spin_unlock_irq( stheno_queue->queue_lock );
                if( ret == true ) goto next_segment;
                continue;
            }
            if( rq_data_dir( req ) == 0 ){
                ret = euryale_read_process( stheno_lbaoffset + blk_rq_pos( req ), blk_rq_cur_sectors( req ), req->buffer );
            }else{
                ret = euryale_write_process( stheno_lbaoffset + blk_rq_pos( req ), blk_rq_cur_sectors( req ), req->buffer );
            }
            /*blk_end_request_cur( req, ret == 0 ? 0 : -EIO );*/
            spin_lock_irq( stheno_queue->queue_lock );
            ret = __blk_end_request_cur( req, ret == 0 ? 0 : -EIO );
            spin_unlock_irq( stheno_queue->queue_lock );
            if( ret == true ) goto next_segment;
        }
    }
    print_debug("stheno_request_thread was terminated.\n");
    return 0;
}
Example #4
0
static inline void setup_transfer(struct floppy_state *fs)
{
	int n;
	struct swim3 __iomem *sw = fs->swim3;
	struct dbdma_cmd *cp = fs->dma_cmd;
	struct dbdma_regs __iomem *dr = fs->dma;
	struct request *req = fs->cur_req;

	if (blk_rq_cur_sectors(req) <= 0) {
		swim3_warn("%s", "Transfer 0 sectors ?\n");
		return;
	}
	if (rq_data_dir(req) == WRITE)
		n = 1;
	else {
		n = fs->secpertrack - fs->req_sector + 1;
		if (n > blk_rq_cur_sectors(req))
			n = blk_rq_cur_sectors(req);
	}

	swim3_dbg("  setup xfer at sect %d (of %d) head %d for %d\n",
		  fs->req_sector, fs->secpertrack, fs->head, n);

	fs->scount = n;
	swim3_select(fs, fs->head? READ_DATA_1: READ_DATA_0);
	out_8(&sw->sector, fs->req_sector);
	out_8(&sw->nsect, n);
	out_8(&sw->gap3, 0);
	out_le32(&dr->cmdptr, virt_to_bus(cp));
	if (rq_data_dir(req) == WRITE) {
		/* Set up 3 dma commands: write preamble, data, postamble */
		init_dma(cp, OUTPUT_MORE, write_preamble, sizeof(write_preamble));
		++cp;
		init_dma(cp, OUTPUT_MORE, bio_data(req->bio), 512);
		++cp;
		init_dma(cp, OUTPUT_LAST, write_postamble, sizeof(write_postamble));
	} else {
		init_dma(cp, INPUT_LAST, bio_data(req->bio), n * 512);
	}
	++cp;
	out_le16(&cp->command, DBDMA_STOP);
	out_8(&sw->control_bic, DO_ACTION | WRITE_SECTORS);
	in_8(&sw->error);
	out_8(&sw->control_bic, DO_ACTION | WRITE_SECTORS);
	if (rq_data_dir(req) == WRITE)
		out_8(&sw->control_bis, WRITE_SECTORS);
	in_8(&sw->intr);
	out_le32(&dr->control, (RUN << 16) | RUN);
	/* enable intr when transfer complete */
	out_8(&sw->intr_enable, TRANSFER_DONE);
	out_8(&sw->control_bis, DO_ACTION);
	set_timeout(fs, 2*HZ, xfer_timeout);	/* enable timeout */
}
Example #5
0
static void start_request(struct floppy_state *fs)
{
	struct request *req;
	unsigned long x;

	if (fs->state == idle && fs->wanted) {
		fs->state = available;
		wake_up(&fs->wait);
		return;
	}
	while (fs->state == idle) {
		if (!fd_req) {
			fd_req = blk_fetch_request(swim3_queue);
			if (!fd_req)
				break;
		}
		req = fd_req;
#if 0
		printk("do_fd_req: dev=%s cmd=%d sec=%ld nr_sec=%u buf=%p\n",
		       req->rq_disk->disk_name, req->cmd,
		       (long)blk_rq_pos(req), blk_rq_sectors(req), req->buffer);
		printk("           errors=%d current_nr_sectors=%u\n",
		       req->errors, blk_rq_cur_sectors(req));
#endif

		if (blk_rq_pos(req) >= fs->total_secs) {
			swim3_end_request_cur(-EIO);
			continue;
		}
		if (fs->ejected) {
			swim3_end_request_cur(-EIO);
			continue;
		}

		if (rq_data_dir(req) == WRITE) {
			if (fs->write_prot < 0)
				fs->write_prot = swim3_readbit(fs, WRITE_PROT);
			if (fs->write_prot) {
				swim3_end_request_cur(-EIO);
				continue;
			}
		}

		/* Do not remove the cast. blk_rq_pos(req) is now a
		 * sector_t and can be 64 bits, but it will never go
		 * past 32 bits for this driver anyway, so we can
		 * safely cast it down and not have to do a 64/32
		 * division
		 */
		fs->req_cyl = ((long)blk_rq_pos(req)) / fs->secpercyl;
		x = ((long)blk_rq_pos(req)) % fs->secpercyl;
		fs->head = x / fs->secpertrack;
		fs->req_sector = x % fs->secpertrack + 1;
		fd_req = req;
		fs->state = do_transfer;
		fs->retries = 0;

		act(fs);
	}
}
/**ltl
 * 功能
 * 参数
 * 返回值
 * 说明:此函数不能调用blk_end_request_all函数,因为blk_end_request_all持有请求队列锁,用的话就会导致死锁。
 *      Q:为什么调用blk_end_request_all函数就会发生死锁呢?
 */
static void mem_block_requeut_fn(struct request_queue* q)
{
	struct request* req = NULL;
	while(NULL != (req = blk_fetch_request(q)))//
	{
		if(blk_rq_pos(req) + blk_rq_cur_sectors(req) > get_capacity(req->rq_disk))
		{
			__blk_end_request_all(req,-EIO); /* 不能被blk_end_request_all替换 */
			continue;
		}

		switch(rq_data_dir(req))
		{
			case READ:
                {
				    memcpy(req->buffer,g_mem_buf + (blk_rq_pos(req) << 9),blk_rq_cur_bytes(req));
				    __blk_end_request_all(req,0); /* 不能被blk_end_request_all替换 */
				    break;
                }
			case WRITE:
                {
				    memcpy(g_mem_buf + (blk_rq_pos(req) << 9),req->buffer,blk_rq_cur_bytes(req));
				    __blk_end_request_all(req,0); /* 不能被blk_end_request_all替换 */
				    break;
                }
			default:
				__blk_end_request_all(req,-EIO); /* 不能被blk_end_request_all替换 */
				break;
		}
	}
  //  BLK_PLog("req:0x%p",req);
}
static int htifblk_segment(struct htifblk_device *dev,
	struct request *req)
{
	static struct htifblk_request pkt __aligned(HTIF_ALIGN);
	u64 offset, size, end;

	offset = (blk_rq_pos(req) << SECTOR_SIZE_SHIFT);
	size = (blk_rq_cur_sectors(req) << SECTOR_SIZE_SHIFT);

	end = offset + size;
	if (unlikely(end < offset || end > dev->size)) {
		dev_err(&dev->dev->dev, "out-of-bounds access:"
			" offset=%llu size=%llu\n", offset, size);
		return -EINVAL;
	}

	rmb();
	pkt.addr = __pa(bio_data(req->bio));
	pkt.offset = offset;
	pkt.size = size;
	pkt.tag = dev->tag;

	dev->req = req;
	dev->msg_buf.dev = dev->dev->index;
	dev->msg_buf.cmd = (rq_data_dir(req) == READ) ?
		HTIF_CMD_READ : HTIF_CMD_WRITE;
	dev->msg_buf.data = __pa(&pkt);
	htif_tohost(&dev->msg_buf);
	return 0;
}
Example #8
0
static void do_z2_request(struct request_queue *q)
{
	struct request *req;

	req = blk_fetch_request(q);
	while (req) {
		unsigned long start = blk_rq_pos(req) << 9;
		unsigned long len  = blk_rq_cur_bytes(req);
		int err = 0;

		if (start + len > z2ram_size) {
			printk( KERN_ERR DEVICE_NAME ": bad access: block=%lu, count=%u\n",
				blk_rq_pos(req), blk_rq_cur_sectors(req));
			err = -EIO;
			goto done;
		}
		while (len) {
			unsigned long addr = start & Z2RAM_CHUNKMASK;
			unsigned long size = Z2RAM_CHUNKSIZE - addr;
			if (len < size)
				size = len;
			addr += z2ram_map[ start >> Z2RAM_CHUNKSHIFT ];
			if (rq_data_dir(req) == READ)
				memcpy(req->buffer, (char *)addr, size);
			else
				memcpy((char *)addr, req->buffer, size);
			start += size;
			len -= size;
		}
	done:
		if (!__blk_end_request_cur(req, err))
			req = blk_fetch_request(q);
	}
}
/*
 * Verifies if a request should be dispatched or not.
 *
 * Returns:
 *  <0 in case of error.
 *  0  if request passes the checks
 */
static int sd_check_request(struct sd_host *host, struct request *req)
{
	unsigned long nr_sectors;

	if (req->cmd_type != REQ_TYPE_FS)
		return -EIO;

	if (test_bit(__SD_MEDIA_CHANGED, &host->flags)) {
		sd_printk(KERN_ERR, "media changed, aborting\n");
		return -ENOMEDIUM;
	}

	/* unit is kernel sectors */
	nr_sectors =
	    host->card.csd.capacity << (host->card.csd.read_blkbits - KERNEL_SECTOR_SHIFT);

	/* keep our reads within limits */

	if ((blk_rq_pos(req) + blk_rq_cur_sectors(req)) > nr_sectors) {
		sd_printk(KERN_ERR, "reading past end, aborting\n");
		return -EINVAL;
	} 

	return 0;
}
/*
 * Performs a write request for SDHC.
 */
static int sdhc_write_request(struct sd_host *host, struct request *req)
{
	int i;
	unsigned long nr_blocks; /* in card blocks */
	size_t block_len; /* in bytes */
	unsigned long start;
	void *buf = req->buffer;
	int retval;

	/* FIXME?, maybe should use 2^WRITE_BL_LEN blocks */

	/* kernel sectors and card write blocks are both 512 bytes long */
	start = blk_rq_pos(req);
	nr_blocks = blk_rq_cur_sectors(req);
	block_len = 1 << KERNEL_SECTOR_SHIFT;

	for (i = 0; i < nr_blocks; i++) {
		retval = sd_write_single_block(host, start, buf, block_len);
		if (retval < 0)
			break;

		start ++;
		buf += block_len;
	}

	/* number of kernel sectors transferred */
	retval = i;

	return retval;
}
/*
 * Performs a read request for SDHC.
 */
static int sdhc_read_request(struct sd_host *host, struct request *req)
{
	int i;
	unsigned long nr_blocks; /* in card blocks */
	size_t block_len; /* in bytes */
	unsigned long start;
	void *buf = req->buffer;
	int retval;

	start = blk_rq_pos(req);

	nr_blocks = blk_rq_cur_sectors(req);
	block_len = 1 << KERNEL_SECTOR_SHIFT;

	for (i = 0; i < nr_blocks; i++) {
		retval = sd_read_single_block(host, start, buf, block_len);
		if (retval < 0)
			break;

		start ++;
		buf += block_len;
	}

	/* number of kernel sectors transferred */
	retval = i;

	return retval;
}
Example #12
0
static void simp_blkdev_do_request(struct request_queue *q)
{
   struct request *req ;
   req = blk_fetch_request(q);
   while(req)
   {
       unsigned long start; 
       unsigned long len; 
       int err=0;
       start =blk_rq_pos(req)<<9; 
       len =blk_rq_cur_sectors(req)<<9;
       if(start + len >SIMP_BLKDEV_BYTES)
       {
           printk(KERN_ERR SIMP_BLKDEV_DISKNAME ":bad access:block=%lu,count=%u\n",blk_rq_pos(req), blk_rq_cur_sectors(req));
           err = -EIO;
           goto done;
       }
       if(rq_data_dir(req)==READ)
           memcpy(req->buffer,simp_blkdev_data+start,len);
       else
           memcpy(simp_blkdev_data+start,req->buffer,len);
done:
       if(!__blk_end_request_cur(req,err));
       req =blk_fetch_request(q);
   }
}
Example #13
0
static void cf_request(struct request_queue *q)
{
	struct cf_device *cf;
	struct request *req;
	unsigned block, count;
	int rw, err;

	DPRINTK(DEBUG_CF_REQUEST, "%s: q %p", __FUNCTION__, q);

	req = blk_fetch_request(q);
	while (req) {
		err = -EIO;
		DPRINTK(DEBUG_CF_REQUEST, "%s:%d req %p", __FUNCTION__, __LINE__, req);

		if (!blk_fs_request(req))
			goto done;

		

		block = blk_rq_pos(req);
		count = blk_rq_cur_sectors(req);
		rw = rq_data_dir(req);
		cf = req->rq_disk->private_data;

		DPRINTK(DEBUG_CF_REQUEST, "req %p block %d count %d rw %c\n", req, block, count, (rw == READ)?'R':'W');

		if (block+count > get_capacity(req->rq_disk)) {
			printk("%s: %u+%u is larger than %llu\n", __FUNCTION__, block, count, get_capacity(req->rq_disk));
			goto done;
		}

		/* Grab the R/W semaphore to prevent more than
		 * one request from trying to R/W at the same time */
		err = down_interruptible(&cf->rw_sem);
		if (err)
			break;

		if (rw == READ)
			err = cf_read_sectors(cf, req->buffer, block, count);
		else
			err = cf_write_sectors(cf, req->buffer, block, count);
		up(&cf->rw_sem);

	done:
		DPRINTK(DEBUG_CF_REQUEST, "%s: blk_end_request_cur(%p, %d)\n", __FUNCTION__, req, err);
		if (!__blk_end_request_cur(req, err))
			req = blk_fetch_request(q);
	}
	DPRINTK(DEBUG_CF_REQUEST, "end\n");
	cf_in_request--;
}
Example #14
0
void block_request(struct request_queue *q)
{
  struct request *req;
  unsigned long offset, nbytes;

  req = blk_fetch_request(q);
  while (req != NULL) {
    // Stop looping once we've exhausted the queue.
    // The kernel will call this function whenever
    // there is at least one element in the queue.

    // Check if we support handling this request.
    if (req == NULL || req->cmd_type != REQ_TYPE_FS) {
      // Declare our intention to handle no buffers
      // from this request.  We'll use an IO error
      // to signal that we don't accept requests that
      // aren't related to reading/writing to the
      // filesystem.
      blk_end_request_all(req, -EIO);
      continue;
    }
    
    // Handle the request.

    //
    offset = blk_rq_pos(req) * LOGICAL_BLOCK_SIZE;
    //
    nbytes = blk_rq_cur_sectors(req) * LOGICAL_BLOCK_SIZE;

    if (rq_data_dir(req)) {
      // Check that the write won't exceed the size of the block device.
      if ((offset + nbytes) <= size) {
	// Do write.
	memcpy(data + offset, req->buffer, nbytes);
      }
    } else {
      // Do read.
      memcpy(req->buffer, data + offset, nbytes);
    }

    // Declare our intention to end the request.
    // if buffers still need to be handled, blk_end_request_cur
    // will return true, and we'll continue handling this req.
    if (!blk_end_request_cur(req, 0)) {
      // If not, pop a new request off the queue
      req = blk_fetch_request(q);
    }
  }
}
/*
 * Handle an I/O request.
 */
static void ramdisk_transfer(struct ramdisk_dev *dev, struct request *req)
{
    unsigned long sector = blk_rq_pos(req);
    unsigned long nsect = blk_rq_cur_sectors(req);
    char* buffer = bio_data(req->bio);
    int write = rq_data_dir(req);
    unsigned long offset = sector*KERNEL_SECTOR_SIZE;
    unsigned long nbytes = nsect*KERNEL_SECTOR_SIZE;

    struct req_data* rdata;
    struct ramdisk_message* msg;
    char* payload;
    int payload_length;

    struct msg_buffer* snd_buff;

    if ((offset + nbytes) > dev->size) {
        printk (KERN_NOTICE "Beyond-end write (%ld %ld)\n", offset, nbytes);
        return;
    }

    payload_length = nbytes * write;
    rdata = kmalloc(sizeof(struct req_data), GFP_KERNEL);
    rdata->dev = dev;
    rdata->req = req;
    rdata->buffer = buffer;
    msg = kmalloc(sizeof(struct ramdisk_message) + payload_length, GFP_KERNEL);
    msg->req_number = (unsigned long)rdata;
    msg->sector = sector;
    msg->nsect = nsect;
    msg->write = write;
    payload = ((void*)msg) + sizeof(struct ramdisk_message);

    /* Copy payload if it is a write request */
    if (write) {
        memcpy(payload, buffer, nbytes);
    }

    /* Create sending buffer */
    snd_buff = kmalloc(sizeof(struct msg_buffer), GFP_KERNEL);
    snd_buff->buffer = msg;
    snd_buff->length = sizeof(struct ramdisk_message) + payload_length;
    snd_buff->capacity = sizeof(struct ramdisk_message) + payload_length;
    snd_buff->release = drv_sent;
    snd_buff->channel = &chn;

    msg_channel_send(snd_buff);
}
Example #16
0
/* 
 * Cette fonction permet de sélectionner une requête dans une file
 * donnée (q) et de l'envoyer à la fonction sbd_transfert afin de la
 * traiter.
 * Une requête peut être composée de plusieurs "morceaux". Dans cette 
 * fonction, chaque "morceau" de la requête sera traité consécutivement
 * jusqu'à ce que cette dernière soit traitée entièrement. 
 */
static void sbd_request(struct request_queue *q) {
	struct request *req; /* Instancie la requête */

	req = blk_fetch_request(q); /* Sélection de la requête dans la file */
	while (req != NULL) { /* Tant que la requête n'est pas nulle, i.e. file de requête n'est pas vide */
		if (req == NULL || (req->cmd_type != REQ_TYPE_FS)) { /* Si requête nulle ou n'ayant pas le type "fs", i.e. s'il ne s'agit pas d'une requête liée au système de fichiers */
			printk (KERN_NOTICE "Skip non-CMD request\n"); /* Inscription dans syslog de la non-exécution de la requête */
			__blk_end_request_all(req, -EIO); /* Finition de la requête */
			continue; /* Ignore les instructions suivantes et effectue un nouveau tour de boucle */
		}
		sbd_transfer(&Device, blk_rq_pos(req), blk_rq_cur_sectors(req),
				req->buffer, rq_data_dir(req)); /* Traitement de la requete */
		if ( ! __blk_end_request_cur(req, 0) ) { /* Si la requete n'est pas complètement traitée */
			req = blk_fetch_request(q); /* Sélectionne la suite de la requête dans la file */
		}
	}
}
Example #17
0
static void sbull_request(struct request_queue*q)
{
	struct request *req;

	while((req = blk_fetch_request(q)) != NULL)
	{
		struct sbull_dev *dev = req->rq_disk->private_data;
		if(!blk_fs_request(req))
		{
			printk(KERN_NOTICE " Skip non-fs request\n");
			__blk_end_request_cur(req, 0);
			continue;
		}
		sbull_transfer(dev, blk_rq_pos(req), blk_rq_cur_sectors(req), req->buffer, rq_data_dir(req));
		__blk_end_request_cur(req, 1);
	}
}
Example #18
0
File: sbull.c Project: l3b2w1/ldd
/*
 * The simple form of the request function.
 */
static void sbull_request(request_queue_t *q)
{
	struct request *req = NULL;
	struct sbull_dev *dev = req->rq_disk->private_data;
	
	while ((req = blk_fetch_request(q)) != NULL) {
		if (! req->cmd_type != REQ_TYPE_FS) {
			printk (KERN_NOTICE "Skip non-fs request\n");
			blk_end_request_all(req, -EIO);
			continue;
		}
		
		sbull_transfer(dev, blk_rq_pos(req), blk_rq_cur_sectors(req),
				req->buffer, rq_data_dir(req));
		blk_end_request_all(req, 1);
	}
}
Example #19
0
File: vbd.c Project: truncs/vbd
/*
 * Service each request in the queue. If the request
 * is not a REQ_TYPE_FS type then just skip the request
 * notifying that it is skipping this request.
 */
static void vbd_request(struct request_queue * q) {
  struct request *req;
  req = blk_fetch_request(q);

  while(req != NULL) {

	/* This should not happen normally but just in case */
	if(req == NULL || (req->cmd_type != REQ_TYPE_FS)) {
	  printk(KERN_NOTICE "Skip non fs type request\n");
	  __blk_end_request_all(req, -EIO);
	  continue;
	}

	vbd_tx(&device,blk_rq_pos(req), blk_rq_cur_sectors(req),
		   req->buffer, rq_data_dir(req));
	if(!__blk_end_request_cur(req, 0))
	  req = blk_fetch_request(q);
  }
}
Example #20
0
/*
 * do_blkif_request
 *  read a block; request is in a request queue
 */
static void do_blkif_request(struct request_queue *rq)
{
	struct blkfront_info *info = NULL;
	struct request *req;
	int queued;

	pr_debug("Entered do_blkif_request\n");

	queued = 0;

	while ((req = blk_peek_request(rq)) != NULL) {
		info = req->rq_disk->private_data;

		if (RING_FULL(&info->ring))
			goto wait;

		blk_start_request(req);

		if (!blk_fs_request(req)) {
			__blk_end_request_all(req, -EIO);
			continue;
		}

		pr_debug("do_blk_req %p: cmd %p, sec %lx, "
			 "(%u/%u) buffer:%p [%s]\n",
			 req, req->cmd, (unsigned long)blk_rq_pos(req),
			 blk_rq_cur_sectors(req), blk_rq_sectors(req),
			 req->buffer, rq_data_dir(req) ? "write" : "read");

		if (blkif_queue_request(req)) {
			blk_requeue_request(rq, req);
wait:
			/* Avoid pointless unplugs. */
			blk_stop_queue(rq);
			break;
		}

		queued++;
	}

	if (queued != 0)
		flush_requests(info);
}
Example #21
0
/*
 * Performs a read request.
 */
static int sd_read_request(struct sd_host *host, struct request *req)
{
	int i;
	unsigned long nr_blocks; /* in card blocks */
	size_t block_len; /* in bytes */
	unsigned long start;
	void *buf = req->buffer;
	int retval;

	/*
	 * It seems that some cards do not accept single block reads for the
	 * read block length reported by the card.
	 * For now, we perform only 512 byte single block reads.
	 */

	start = blk_rq_pos(req) << KERNEL_SECTOR_SHIFT;
#if 0
	nr_blocks = req->current_nr_sectors >>
			 (host->card.csd.read_blkbits - KERNEL_SECTOR_SHIFT);
	block_len = 1 << host->card.csd.read_blkbits;
#else
	nr_blocks = blk_rq_cur_sectors(req);
	block_len = 1 << KERNEL_SECTOR_SHIFT;
#endif

	for (i = 0; i < nr_blocks; i++) {
		retval = sd_read_single_block(host, start, buf, block_len);
		if (retval < 0)
			break;

		start += block_len;
		buf += block_len;
	}

	/* number of kernel sectors transferred */
#if 0
	retval = i << (host->card.csd.read_blkbits - KERNEL_SECTOR_SHIFT);
#else
	retval = i;
#endif

	return retval;
}
Example #22
0
static void my_request(struct request_queue *q)
{
	struct request *rq;
	int size, res = 0;
	char *ptr;
	unsigned nr_sectors, sector;
	printk(KERN_INFO "entering request routine\n");

	rq = blk_fetch_request(q);
	while (rq) {
		if (!blk_fs_request(rq)) {
			printk(KERN_WARNING
			       "This was not a normal fs request, skipping\n");
			goto done;
		}
		nr_sectors = blk_rq_cur_sectors(rq);
		sector = blk_rq_pos(rq);

		ptr = ramdisk + sector * sector_size;
		size = nr_sectors * sector_size;

		if ((ptr + size) > (ramdisk + disk_size)) {
			printk(KERN_WARNING
			       " tried to go past end of device\n");
			goto done;
		}

		if (rq_data_dir(rq)) {
			printk(KERN_INFO "writing at sector %d, %u sectors \n",
			       sector, nr_sectors);
			memcpy(ptr, rq->buffer, size);
		} else {
			printk(KERN_INFO "reading at sector %d, %u sectors \n",
			       sector, nr_sectors);
			memcpy(rq->buffer, ptr, size);
		}
	      done:
		if (!__blk_end_request_cur(rq, res))
			rq = blk_fetch_request(q);
	}
	printk(KERN_INFO "leaving request\n");
}
Example #23
0
/*
 * The simple form of the request function.
 */
static void sbull_request(struct request_queue *q)
{
	struct request *req;

	while ((req = blk_fetch_request(q)) != NULL) {
		struct sbull_dev *dev = req->rq_disk->private_data;
		if (req->cmd_type != REQ_TYPE_FS) {
			printk (KERN_NOTICE "Skip non-fs request\n");
			__blk_end_request_cur(req, -EIO);
			continue;
		}
    //    	printk (KERN_NOTICE "Req dev %d dir %ld sec %ld, nr %d f %lx\n",
    //    			dev - Devices, rq_data_dir(req),
    //    			req->sector, req->current_nr_sectors,
    //    			req->flags);
		sbull_transfer(dev, blk_rq_pos(req), blk_rq_cur_sectors(req),
				req->buffer, rq_data_dir(req));
		__blk_end_request_cur(req, 0);
	}
}
Example #24
0
static void looper_request(struct request_queue *q) {
 
  struct request *req;
 
  printk(KERN_INFO "looper: executing request");
   
  req = blk_fetch_request(q);
  while (req != NULL) {
    if (req == NULL || (req->cmd_type != REQ_TYPE_FS)) {
      printk (KERN_NOTICE "Skip non-CMD request\n");
      __blk_end_request_all(req, -EIO);
      continue;
    }
    looper_transfer(&Device, blk_rq_pos(req), blk_rq_cur_sectors(req),
		 req->buffer, rq_data_dir(req));
    if ( ! __blk_end_request_cur(req, 0) ) {
      req = blk_fetch_request(q);
    }
  }
}
Example #25
0
File: sbd.c Project: OSLL/ioperf
static void sbd_request(struct request_queue *q) {
    struct request *req;

    req = blk_fetch_request(q);
    while (req != NULL) {
        // blk_fs_request() was removed in 2.6.36 - many thanks to
        // Christian Paro for the heads up and fix...
        //if (!blk_fs_request(req)) {
        if (req == NULL || (req->cmd_type != REQ_TYPE_FS)) {
            printk (KERN_NOTICE "Skip non-CMD request\n");
            __blk_end_request_all(req, -EIO);
            continue;
        }
        sbd_transfer(&Device, blk_rq_pos(req), blk_rq_cur_sectors(req),
                req->buffer, rq_data_dir(req));
        if ( ! __blk_end_request_cur(req, 0) ) {
            req = blk_fetch_request(q);
        }
    }
}
Example #26
0
/*
 * Simply used for requesting a transfer (read or write) of
 * data from the RAM disk.
 */
static void osurd_request(struct request_queue *q)
{
    struct request *req;
    req = blk_fetch_request(q);

    while(req != NULL) {
        struct osurd_dev *dev = req->rq_disk->private_data;
        if(req->cmd_type != REQ_TYPE_FS) {
            printk(KERN_NOTICE "Skip non-fs request\n");
            __blk_end_request_all(req, -EIO);
            continue;
        }
        osurd_transfer(dev, blk_rq_pos(req), blk_rq_cur_sectors(req),
                       req->buffer, rq_data_dir(req)):

            if(!__blk_end_request_cur(req, 0)) {
            req = blk_fetch_request(q);
        }
    }
}
Example #27
0
File: sb.c Project: hsnaves/jmips
static void sb_request(struct request_queue *q) {
	struct request *req;
	int error;

	req = blk_fetch_request(q);
	while (req != NULL) {
		/* Check request type */
		if (req == NULL || (req->cmd_type != REQ_TYPE_FS)) {
			__blk_end_request_all(req, -EIO);
			continue;
		}
		/* Do transfer */
		error = sb_transfer(sbd, blk_rq_pos(req), blk_rq_cur_sectors(req), req->buffer, rq_data_dir(req));
		if (!__blk_end_request_cur(req, error ? -EIO : 0) ) {
			req = blk_fetch_request(q);
		}
	}

	return;
}
void blk_dump_rq_flags(struct request *rq, char *msg)
{
	int bit;

	printk(KERN_INFO "%s: dev %s: type=%x, flags=%x\n", msg,
		rq->rq_disk ? rq->rq_disk->disk_name : "?", rq->cmd_type,
		rq->cmd_flags);

	printk(KERN_INFO "  sector %llu, nr/cnr %u/%u\n",
	       (unsigned long long)blk_rq_pos(rq),
	       blk_rq_sectors(rq), blk_rq_cur_sectors(rq));
	printk(KERN_INFO "  bio %p, biotail %p, buffer %p, len %u\n",
	       rq->bio, rq->biotail, rq->buffer, blk_rq_bytes(rq));

	if (rq->cmd_type == REQ_TYPE_BLOCK_PC) {
		printk(KERN_INFO "  cdb: ");
		for (bit = 0; bit < BLK_MAX_CDB; bit++)
			printk("%02x ", rq->cmd[bit]);
		printk("\n");
	}
}
Example #29
0
static void sbd_request(struct request_queue *q) {
        struct request *req;
        unsigned long offset;
        unsigned long nbytes;

        req = blk_fetch_request(q);
        while (req != NULL) {
                if (req == NULL || (req->cmd_type != REQ_TYPE_FS)) {
                        printk (KERN_NOTICE "Skip non-CMD request\n");
                        __blk_end_request_all(req, -EIO);
                        continue;
                }
		offset = blk_rq_pos(req) * logical_block_size;
		nbytes = blk_rq_cur_sectors(req) * logical_block_size;

		operar_sector(&Device, offset, nbytes, req->buffer, rq_data_dir(req));

                if ( ! __blk_end_request_cur(req, 0) ) {
                        req = blk_fetch_request(q);
                }
        }
}
static void ramdisk_request(struct request_queue *q)
{
    struct request *req;
    int ret;

    req = blk_fetch_request(q);
    while (req) {
        struct ramdisk_dev *dev = req->rq_disk->private_data;
        if (req->cmd_type != REQ_TYPE_FS) {
            printk (KERN_NOTICE "Skip non-fs request\n");
            ret = -EIO;
            goto done;
        }
        msg_dbg (KERN_NOTICE "Req dev %u dir %d sec %ld, nr %d\n",
            (unsigned int)dev, rq_data_dir(req),
                (long int)blk_rq_pos(req), blk_rq_cur_sectors(req));
        ramdisk_transfer(dev, req);
        ret = 0;
    done:
        req = blk_fetch_request(q);
    }
}