Beispiel #1
0
static void sbd_request_func(struct request_queue *q)
{
	struct request *req;

	while((req = blk_fetch_request(q)) != NULL)
	{
		if (req->cmd_type != REQ_TYPE_FS) {
			printk (KERN_NOTICE "Skip non-fs request\n");

			__blk_end_request_cur(req, -EIO);
			continue;
		}

		if (((blk_rq_pos(req) << 9) + blk_rq_bytes(req)) > SBD_BYTES) {
			printk (KERN_INFO "out of disk boundary\n");

			__blk_end_request_cur(req, -EIO);
			break;
		}

		printk (KERN_INFO "%s, rq_pos << 9 = %lu, rq_bytes = %lu\n",
			(rq_data_dir(req) == WRITE) ? "WRITE" : "READ",
			(unsigned long)(blk_rq_pos(req) << 9),
			(unsigned long)blk_rq_bytes(req));

		if(rq_data_dir(req) == WRITE)
			memcpy(sbd_data + (blk_rq_pos(req) << 9), req->buffer,
				blk_rq_bytes(req));
		else
			memcpy(req->buffer, sbd_data + (blk_rq_pos(req) << 9),
				blk_rq_bytes(req));

		__blk_end_request_cur(req, 0);
	}
}
Beispiel #2
0
static int stheno_request_thread( void *arg )
{
    struct request *req;
    int ret;

    while( 1 ){
        ret = wait_event_interruptible( stheno_wait_q, (kthread_should_stop() || stheno_wakeup == 1) );
        if( ret != 0 ) break;

        stheno_wakeup = 0;

        if( kthread_should_stop() ) break;

        while( 1 ){
            spin_lock_irq( stheno_queue->queue_lock );
            req = blk_fetch_request( stheno_queue );
            spin_unlock_irq( stheno_queue->queue_lock );
        next_segment:
            if( req == NULL ) break;

            if( !blk_fs_request( req ) ){
                /*blk_end_request_cur( req, -EIO );*/
                spin_lock_irq( stheno_queue->queue_lock );
                ret = __blk_end_request_cur( req, -EIO );
                spin_unlock_irq( stheno_queue->queue_lock );
                if( ret == true ) goto next_segment;
                continue;
            }
            if( stheno_read_sector0() != 0 ){
                spin_lock_irq( stheno_queue->queue_lock );
                ret = __blk_end_request_cur( req, -EIO );
                spin_unlock_irq( stheno_queue->queue_lock );
                if( ret == true ) goto next_segment;
                continue;
            }
            if( blk_rq_sectors( req ) == 0 || blk_rq_cur_sectors( req ) == 0 ){
                spin_lock_irq( stheno_queue->queue_lock );
                ret = __blk_end_request_cur( req, -EIO );
                spin_unlock_irq( stheno_queue->queue_lock );
                if( ret == true ) goto next_segment;
                continue;
            }
            if( rq_data_dir( req ) == 0 ){
                ret = euryale_read_process( stheno_lbaoffset + blk_rq_pos( req ), blk_rq_cur_sectors( req ), req->buffer );
            }else{
                ret = euryale_write_process( stheno_lbaoffset + blk_rq_pos( req ), blk_rq_cur_sectors( req ), req->buffer );
            }
            /*blk_end_request_cur( req, ret == 0 ? 0 : -EIO );*/
            spin_lock_irq( stheno_queue->queue_lock );
            ret = __blk_end_request_cur( req, ret == 0 ? 0 : -EIO );
            spin_unlock_irq( stheno_queue->queue_lock );
            if( ret == true ) goto next_segment;
        }
    }
    print_debug("stheno_request_thread was terminated.\n");
    return 0;
}
Beispiel #3
0
/*
 * The simple form of the request function.
 */
static void sbull_request(struct request_queue *q)
{
	struct request *req;
	int ret;

	req = blk_fetch_request(q);
	while (req) {
		struct sbull_dev *dev = req->rq_disk->private_data;
		if (req->cmd_type != REQ_TYPE_FS) {
			printk (KERN_NOTICE "Skip non-fs request\n");
			ret = -EIO;
			goto done;
		}
		printk (KERN_NOTICE "Req dev %u dir %d sec %ld, nr %d\n",
			(unsigned)(dev - Devices), rq_data_dir(req),
			blk_rq_pos(req), blk_rq_cur_sectors(req));
		sbull_transfer(dev, blk_rq_pos(req), blk_rq_cur_sectors(req),
				req->buffer, rq_data_dir(req));
		ret = 0;
	done:
		if(!__blk_end_request_cur(req, ret)){
			req = blk_fetch_request(q);
		}
	}
}
Beispiel #4
0
static void htifblk_request(struct request_queue *q)
{
	struct htifblk_device *dev;
	struct request *req;
	unsigned long flags;
	int ret;

	dev = q->queuedata;
	spin_lock_irqsave(q->queue_lock, flags);
	if (dev->req != NULL)
		goto out;

	while ((req = blk_fetch_request(q)) != NULL) {
		if (req->cmd_type == REQ_TYPE_FS) {
			ret = htifblk_segment(dev, req);
			if (unlikely(ret)) {
				WARN_ON(__blk_end_request_cur(req, ret));
				continue;
			}
			blk_stop_queue(q);
			break;
		} else {
			blk_dump_rq_flags(req, DRIVER_NAME
				": ignored non-fs request");
			__blk_end_request_all(req, -EIO);
			continue;
		}
	}
out:
	spin_unlock_irqrestore(q->queue_lock, flags);
}
Beispiel #5
0
static irqreturn_t htifblk_isr(struct htif_device *dev, unsigned long data)
{
	struct htifblk_device *htifblk_dev;
	unsigned int tag;
	irqreturn_t ret;
	int err;

	htifblk_dev = dev_get_drvdata(&dev->dev);
	ret = IRQ_NONE;

	spin_lock(&htifblk_dev->lock);
	if (unlikely(htifblk_dev->req == NULL)) {
		dev_err(&dev->dev, "null request\n");
		goto out;
	}

	err = 0;
	tag = (data << HTIF_DEV_SHIFT) >> HTIF_DEV_SHIFT;
	if (unlikely(tag != htifblk_dev->tag)) {
		dev_err(&dev->dev, "tag mismatch: expected=%u actual=%u\n",
			htifblk_dev->tag, tag);
		err = -EIO;
	}

	wmb();
	WARN_ON(__blk_end_request_cur(htifblk_dev->req, err));
	htifblk_dev->req = NULL;
	blk_start_queue(htifblk_dev->disk->queue);
	ret = IRQ_HANDLED;
out:
	spin_unlock(&htifblk_dev->lock);
	return ret;
}
Beispiel #6
0
static void do_z2_request(struct request_queue *q)
{
	struct request *req;

	req = blk_fetch_request(q);
	while (req) {
		unsigned long start = blk_rq_pos(req) << 9;
		unsigned long len  = blk_rq_cur_bytes(req);
		int err = 0;

		if (start + len > z2ram_size) {
			printk( KERN_ERR DEVICE_NAME ": bad access: block=%lu, count=%u\n",
				blk_rq_pos(req), blk_rq_cur_sectors(req));
			err = -EIO;
			goto done;
		}
		while (len) {
			unsigned long addr = start & Z2RAM_CHUNKMASK;
			unsigned long size = Z2RAM_CHUNKSIZE - addr;
			if (len < size)
				size = len;
			addr += z2ram_map[ start >> Z2RAM_CHUNKSHIFT ];
			if (rq_data_dir(req) == READ)
				memcpy(req->buffer, (char *)addr, size);
			else
				memcpy((char *)addr, req->buffer, size);
			start += size;
			len -= size;
		}
	done:
		if (!__blk_end_request_cur(req, err))
			req = blk_fetch_request(q);
	}
}
Beispiel #7
0
static void do_ramblock_request (struct request_queue * q)
{
    static int cnt = 0;
	struct request *req;

    printk("do_ramblock_request: cnt = %d\n", ++cnt);

	while ((req = blk_fetch_request(q)) != NULL) {
        __blk_end_request_cur(req, 0);
#if 0
        if (!__blk_end_request_cur(req, 0)) {
			//req = blk_fetch_request(q);     
        }
#endif
    }
}
Beispiel #8
0
static void do_ramblk_request(struct request_queue *q )
{
	struct request *req;
//	static volatile int r_cnt = 0;
//	static volatile int w_cnt = 0;
	//printk("ramblk_request_fn %d.\n",cnt++);
	req = blk_fetch_request(q);
	while (req) {
			unsigned long start = blk_rq_pos(req) << 9;
			unsigned long len  = blk_rq_cur_bytes(req);
//			printk("len=%d.\n",len);
		
			if (start + len > RAMBLK_SIZE) {
					printk("RAMBLK_SIZE< start+len");
					goto done;
				}
			
			if (rq_data_dir(req) == READ)
				memcpy(req->buffer, (char *)(start+ramblk_buf), len);
			else
				memcpy((char *)(start+ramblk_buf), req->buffer, len);
		
			done:
				if (!__blk_end_request_cur(req, 0))
					req = blk_fetch_request(q);
	}
	
}
void ramhd_req_func(struct request_queue* q) {
    struct request* req;
    RAMHD_DEV* pdev;
    char* pData;
    unsigned long addr, size, start;
    req = blk_fetch_request(q);

    while (req) {
        start = blk_rq_pos(req); // The sector cursor of the current request
        pdev = (RAMHD_DEV*)req->rq_disk->private_data;
        pData = pdev->data;
        addr = (unsigned long)pData + start * RAMHD_SECTOR_SIZE;
        size = blk_rq_cur_bytes(req);

        if (rq_data_dir(req) == READ) {
            memcpy(req->buffer, (char*)addr, size);
        } else {
            memcpy((char*)addr, req->buffer, size);
        }

        if (!__blk_end_request_cur(req, 0)) {
            req = blk_fetch_request(q);
        }
    }
}
Beispiel #10
0
static void simp_blkdev_do_request(struct request_queue *q)
{
   struct request *req ;
   req = blk_fetch_request(q);
   while(req)
   {
       unsigned long start; 
       unsigned long len; 
       int err=0;
       start =blk_rq_pos(req)<<9; 
       len =blk_rq_cur_sectors(req)<<9;
       if(start + len >SIMP_BLKDEV_BYTES)
       {
           printk(KERN_ERR SIMP_BLKDEV_DISKNAME ":bad access:block=%lu,count=%u\n",blk_rq_pos(req), blk_rq_cur_sectors(req));
           err = -EIO;
           goto done;
       }
       if(rq_data_dir(req)==READ)
           memcpy(req->buffer,simp_blkdev_data+start,len);
       else
           memcpy(simp_blkdev_data+start,req->buffer,len);
done:
       if(!__blk_end_request_cur(req,err));
       req =blk_fetch_request(q);
   }
}
Beispiel #11
0
static irqreturn_t htifblk_isr(struct htif_device *dev, sbi_device_message *msg)
{
	struct htifblk_device *htifblk_dev;
	irqreturn_t ret;
	int err;

	htifblk_dev = dev_get_drvdata(&dev->dev);
	ret = IRQ_NONE;

	spin_lock(&htifblk_dev->lock);
	if (unlikely(htifblk_dev->req == NULL)) {
		dev_err(&dev->dev, "null request\n");
		goto out;
	}

	err = 0;
	if (unlikely(msg->data != htifblk_dev->tag)) {
		dev_err(&dev->dev, "tag mismatch: expected=%u actual=%lu\n",
			htifblk_dev->tag, msg->data);
		err = -EIO;
	}

	wmb();
	WARN_ON(__blk_end_request_cur(htifblk_dev->req, err));
	htifblk_dev->req = NULL;
	blk_start_queue(htifblk_dev->disk->queue);
	ret = IRQ_HANDLED;
out:
	spin_unlock(&htifblk_dev->lock);
	return ret;
}
Beispiel #12
0
static void mini2440_ramdisk_request(struct request_queue *q)
{    
	static int r_cnt = 0;
	static int w_cnt = 0;
	struct request *req;
	
	req = blk_fetch_request(q);
	while (req) {
		/* 数据传输三要素: 源,目的,长度 */
		/* 源/目的: */
		unsigned long offset = blk_rq_pos(req) << 9;

		/* 长度: */		
		unsigned long len = blk_rq_cur_bytes(req);

		if (rq_data_dir(req) == READ) {
			printk("[RAMDISK]ramdisk_request read %d\n", ++r_cnt);
			memcpy(req->buffer, mini2440_ramdisk_devp->ramdisk_buffer+offset, len);
		}else {
			printk("[RAMDISK]ramdisk_request write %d\n", ++w_cnt);
			memcpy(mini2440_ramdisk_devp->ramdisk_buffer+offset, req->buffer, len);
		}		
		
		if (!__blk_end_request_cur(req, 0))
			req = blk_fetch_request(q);
		else
			printk("[RAMDISK]__blk_end_request_cur error!\n");
	}
}
Beispiel #13
0
static void ramblock_do_request(struct request_queue * q)
{
	struct request *req;
	printk("do:ramblock_do_request\n");
	req = blk_fetch_request(q);
	while (req) {
		/*源或目的*/
		unsigned long offset = blk_rq_pos(req) * 512;

		/*目的或源*/
		//req->buffer
		
		/*长度*/
		unsigned long len  = blk_rq_cur_bytes(req);
		if (rq_data_dir(req) == READ)
			memcpy(req->buffer, ram_buff+offset, len);
		else
			memcpy(ram_buff+offset, req->buffer, len);

		/* wrap up, 0 = success, -errno = fail */
		if (!__blk_end_request_cur(req, 0))
			req = blk_fetch_request(q);
	}

}
Beispiel #14
0
static void htifblk_request(struct request_queue *q)
{
	struct htifblk_device *dev;
	struct request *req;
	int ret;

	dev = q->queuedata;
	if (dev->req != NULL)
		return;

	while ((req = blk_fetch_request(q)) != NULL) {
		if (req->cmd_type == REQ_TYPE_FS) {
			ret = htifblk_segment(dev, req);
			if (unlikely(ret)) {
				WARN_ON(__blk_end_request_cur(req, ret));
				continue;
			}
			blk_stop_queue(q);
			break;
		} else {
			blk_dump_rq_flags(req, DRIVER_NAME
				": ignored non-fs request");
			__blk_end_request_all(req, -EIO);
			continue;
		}
	}
}
Beispiel #15
0
static void sbull_request(struct request_queue*q)
{
	struct request *req;

	while((req = blk_fetch_request(q)) != NULL)
	{
		struct sbull_dev *dev = req->rq_disk->private_data;
		if(!blk_fs_request(req))
		{
			printk(KERN_NOTICE " Skip non-fs request\n");
			__blk_end_request_cur(req, 0);
			continue;
		}
		sbull_transfer(dev, blk_rq_pos(req), blk_rq_cur_sectors(req), req->buffer, rq_data_dir(req));
		__blk_end_request_cur(req, 1);
	}
}
Beispiel #16
0
/*
 * The simple form of the request function.
 */
static void sbull_request(struct request_queue *q)
{
	struct request *req;

	while ((req = blk_fetch_request(q)) != NULL) {
		struct sbull_dev *dev = req->rq_disk->private_data;
		if (req->cmd_type != REQ_TYPE_FS) {
			printk (KERN_NOTICE "Skip non-fs request\n");
			__blk_end_request_cur(req, -EIO);
			continue;
		}
    //    	printk (KERN_NOTICE "Req dev %d dir %ld sec %ld, nr %d f %lx\n",
    //    			dev - Devices, rq_data_dir(req),
    //    			req->sector, req->current_nr_sectors,
    //    			req->flags);
		sbull_transfer(dev, blk_rq_pos(req), blk_rq_cur_sectors(req),
				req->buffer, rq_data_dir(req));
		__blk_end_request_cur(req, 0);
	}
}
Beispiel #17
0
static void cf_request(struct request_queue *q)
{
	struct cf_device *cf;
	struct request *req;
	unsigned block, count;
	int rw, err;

	DPRINTK(DEBUG_CF_REQUEST, "%s: q %p", __FUNCTION__, q);

	req = blk_fetch_request(q);
	while (req) {
		err = -EIO;
		DPRINTK(DEBUG_CF_REQUEST, "%s:%d req %p", __FUNCTION__, __LINE__, req);

		if (!blk_fs_request(req))
			goto done;

		

		block = blk_rq_pos(req);
		count = blk_rq_cur_sectors(req);
		rw = rq_data_dir(req);
		cf = req->rq_disk->private_data;

		DPRINTK(DEBUG_CF_REQUEST, "req %p block %d count %d rw %c\n", req, block, count, (rw == READ)?'R':'W');

		if (block+count > get_capacity(req->rq_disk)) {
			printk("%s: %u+%u is larger than %llu\n", __FUNCTION__, block, count, get_capacity(req->rq_disk));
			goto done;
		}

		/* Grab the R/W semaphore to prevent more than
		 * one request from trying to R/W at the same time */
		err = down_interruptible(&cf->rw_sem);
		if (err)
			break;

		if (rw == READ)
			err = cf_read_sectors(cf, req->buffer, block, count);
		else
			err = cf_write_sectors(cf, req->buffer, block, count);
		up(&cf->rw_sem);

	done:
		DPRINTK(DEBUG_CF_REQUEST, "%s: blk_end_request_cur(%p, %d)\n", __FUNCTION__, req, err);
		if (!__blk_end_request_cur(req, err))
			req = blk_fetch_request(q);
	}
	DPRINTK(DEBUG_CF_REQUEST, "end\n");
	cf_in_request--;
}
Beispiel #18
0
/* 
 * Cette fonction permet de sélectionner une requête dans une file
 * donnée (q) et de l'envoyer à la fonction sbd_transfert afin de la
 * traiter.
 * Une requête peut être composée de plusieurs "morceaux". Dans cette 
 * fonction, chaque "morceau" de la requête sera traité consécutivement
 * jusqu'à ce que cette dernière soit traitée entièrement. 
 */
static void sbd_request(struct request_queue *q) {
	struct request *req; /* Instancie la requête */

	req = blk_fetch_request(q); /* Sélection de la requête dans la file */
	while (req != NULL) { /* Tant que la requête n'est pas nulle, i.e. file de requête n'est pas vide */
		if (req == NULL || (req->cmd_type != REQ_TYPE_FS)) { /* Si requête nulle ou n'ayant pas le type "fs", i.e. s'il ne s'agit pas d'une requête liée au système de fichiers */
			printk (KERN_NOTICE "Skip non-CMD request\n"); /* Inscription dans syslog de la non-exécution de la requête */
			__blk_end_request_all(req, -EIO); /* Finition de la requête */
			continue; /* Ignore les instructions suivantes et effectue un nouveau tour de boucle */
		}
		sbd_transfer(&Device, blk_rq_pos(req), blk_rq_cur_sectors(req),
				req->buffer, rq_data_dir(req)); /* Traitement de la requete */
		if ( ! __blk_end_request_cur(req, 0) ) { /* Si la requete n'est pas complètement traitée */
			req = blk_fetch_request(q); /* Sélectionne la suite de la requête dans la file */
		}
	}
}
Beispiel #19
0
Datei: vbd.c Projekt: truncs/vbd
/*
 * Service each request in the queue. If the request
 * is not a REQ_TYPE_FS type then just skip the request
 * notifying that it is skipping this request.
 */
static void vbd_request(struct request_queue * q) {
  struct request *req;
  req = blk_fetch_request(q);

  while(req != NULL) {

	/* This should not happen normally but just in case */
	if(req == NULL || (req->cmd_type != REQ_TYPE_FS)) {
	  printk(KERN_NOTICE "Skip non fs type request\n");
	  __blk_end_request_all(req, -EIO);
	  continue;
	}

	vbd_tx(&device,blk_rq_pos(req), blk_rq_cur_sectors(req),
		   req->buffer, rq_data_dir(req));
	if(!__blk_end_request_cur(req, 0))
	  req = blk_fetch_request(q);
  }
}
Beispiel #20
0
static void my_request(struct request_queue *q)
{
	struct request *rq;
	int size, res = 0;
	char *ptr;
	unsigned nr_sectors, sector;
	printk(KERN_INFO "entering request routine\n");

	rq = blk_fetch_request(q);
	while (rq) {
		if (!blk_fs_request(rq)) {
			printk(KERN_WARNING
			       "This was not a normal fs request, skipping\n");
			goto done;
		}
		nr_sectors = blk_rq_cur_sectors(rq);
		sector = blk_rq_pos(rq);

		ptr = ramdisk + sector * sector_size;
		size = nr_sectors * sector_size;

		if ((ptr + size) > (ramdisk + disk_size)) {
			printk(KERN_WARNING
			       " tried to go past end of device\n");
			goto done;
		}

		if (rq_data_dir(rq)) {
			printk(KERN_INFO "writing at sector %d, %u sectors \n",
			       sector, nr_sectors);
			memcpy(ptr, rq->buffer, size);
		} else {
			printk(KERN_INFO "reading at sector %d, %u sectors \n",
			       sector, nr_sectors);
			memcpy(rq->buffer, ptr, size);
		}
	      done:
		if (!__blk_end_request_cur(rq, res))
			rq = blk_fetch_request(q);
	}
	printk(KERN_INFO "leaving request\n");
}
Beispiel #21
0
static void looper_request(struct request_queue *q) {
 
  struct request *req;
 
  printk(KERN_INFO "looper: executing request");
   
  req = blk_fetch_request(q);
  while (req != NULL) {
    if (req == NULL || (req->cmd_type != REQ_TYPE_FS)) {
      printk (KERN_NOTICE "Skip non-CMD request\n");
      __blk_end_request_all(req, -EIO);
      continue;
    }
    looper_transfer(&Device, blk_rq_pos(req), blk_rq_cur_sectors(req),
		 req->buffer, rq_data_dir(req));
    if ( ! __blk_end_request_cur(req, 0) ) {
      req = blk_fetch_request(q);
    }
  }
}
Beispiel #22
0
/*
 * Simply used for requesting a transfer (read or write) of
 * data from the RAM disk.
 */
static void osurd_request(struct request_queue *q)
{
    struct request *req;
    req = blk_fetch_request(q);

    while(req != NULL) {
        struct osurd_dev *dev = req->rq_disk->private_data;
        if(req->cmd_type != REQ_TYPE_FS) {
            printk(KERN_NOTICE "Skip non-fs request\n");
            __blk_end_request_all(req, -EIO);
            continue;
        }
        osurd_transfer(dev, blk_rq_pos(req), blk_rq_cur_sectors(req),
                       req->buffer, rq_data_dir(req)):

            if(!__blk_end_request_cur(req, 0)) {
            req = blk_fetch_request(q);
        }
    }
}
Beispiel #23
0
Datei: sbd.c Projekt: OSLL/ioperf
static void sbd_request(struct request_queue *q) {
    struct request *req;

    req = blk_fetch_request(q);
    while (req != NULL) {
        // blk_fs_request() was removed in 2.6.36 - many thanks to
        // Christian Paro for the heads up and fix...
        //if (!blk_fs_request(req)) {
        if (req == NULL || (req->cmd_type != REQ_TYPE_FS)) {
            printk (KERN_NOTICE "Skip non-CMD request\n");
            __blk_end_request_all(req, -EIO);
            continue;
        }
        sbd_transfer(&Device, blk_rq_pos(req), blk_rq_cur_sectors(req),
                req->buffer, rq_data_dir(req));
        if ( ! __blk_end_request_cur(req, 0) ) {
            req = blk_fetch_request(q);
        }
    }
}
Beispiel #24
0
static void sb_request(struct request_queue *q) {
	struct request *req;
	int error;

	req = blk_fetch_request(q);
	while (req != NULL) {
		/* Check request type */
		if (req == NULL || (req->cmd_type != REQ_TYPE_FS)) {
			__blk_end_request_all(req, -EIO);
			continue;
		}
		/* Do transfer */
		error = sb_transfer(sbd, blk_rq_pos(req), blk_rq_cur_sectors(req), req->buffer, rq_data_dir(req));
		if (!__blk_end_request_cur(req, error ? -EIO : 0) ) {
			req = blk_fetch_request(q);
		}
	}

	return;
}
Beispiel #25
0
static void sbd_request(struct request_queue *q) {
        struct request *req;
        unsigned long offset;
        unsigned long nbytes;

        req = blk_fetch_request(q);
        while (req != NULL) {
                if (req == NULL || (req->cmd_type != REQ_TYPE_FS)) {
                        printk (KERN_NOTICE "Skip non-CMD request\n");
                        __blk_end_request_all(req, -EIO);
                        continue;
                }
		offset = blk_rq_pos(req) * logical_block_size;
		nbytes = blk_rq_cur_sectors(req) * logical_block_size;

		operar_sector(&Device, offset, nbytes, req->buffer, rq_data_dir(req));

                if ( ! __blk_end_request_cur(req, 0) ) {
                        req = blk_fetch_request(q);
                }
        }
}
Beispiel #26
0
Datei: test.c Projekt: sktwj/var
//从请求队列上获取请求操作对象,从请求对象中获得操作参数:读写操作的起始sector和操作字节数,然后将所需的操作执行到硬件上去
//本函数是由blk驱动框架来自动调用的,调用时机由电梯算法调度决定
static void do_ldm_req(struct request_queue *q)
{
	//从请求队列上获取一个请求对象
	struct request *req = blk_fetch_request(q);
	while (req) {
		//从第几个扇区开始操作
		u32 start = blk_rq_pos(req) * SECTOR_SIZE;
		//获得当前请求操作的字节数
		u32 len = blk_rq_cur_bytes(req);

		//检查本次request操作是否越界
		int err = 0;
		if (start + len > DEV_SIZE) {
			printk(KERN_ERR "request region is out of device capacity\n");
			err = -EIO;
			goto err_request;
		}

		//rq_data_dir获得当前请求的操作方向
		//建议在memcpy前后加上打印语句,以便观察读写操作的调度时机
		//数据从内核传输到应用
		if (rq_data_dir(req) == READ) {
			memcpy(req->buffer, (u8*)ldm.addr + start, len);
			printk("read from %d, size %d\n", start, len);
		} else { //数据从应用层传输到内核并写入
			memcpy((u8*)ldm.addr + start, req->buffer, len);
			printk("write from %d, size %d\n", start, len);
		}

		//__blk_end_request_cur:返回false表示当前req的所有操作都完成了,于是下面试图调用blk_fetch_request再从队列上获取新的请求,如果获取不到,则req得到NULL将退出循环;
		//返回true的话说明当前req操作还没完成,继续循环执行
		//err参数可以独立改变__blk_end_request_cur的返回值,err<0时,函数返回false。当发生其他错误时可以用err参数来结束当前req请求,从请求队列上获取新的请求
err_request:
		if (!__blk_end_request_cur(req, err)) {
			req = blk_fetch_request(q);
		}
	}
}
Beispiel #27
0
static void do_z2_request(struct request_queue *q)
{
	struct request *req;

	req = blk_fetch_request(q);
	while (req) {
		unsigned long start = blk_rq_pos(req) << 9;
		unsigned long len  = blk_rq_cur_bytes(req);
		blk_status_t err = BLK_STS_OK;

		if (start + len > z2ram_size) {
			pr_err(DEVICE_NAME ": bad access: block=%llu, "
			       "count=%u\n",
			       (unsigned long long)blk_rq_pos(req),
			       blk_rq_cur_sectors(req));
			err = BLK_STS_IOERR;
			goto done;
		}
		while (len) {
			unsigned long addr = start & Z2RAM_CHUNKMASK;
			unsigned long size = Z2RAM_CHUNKSIZE - addr;
			void *buffer = bio_data(req->bio);

			if (len < size)
				size = len;
			addr += z2ram_map[ start >> Z2RAM_CHUNKSHIFT ];
			if (rq_data_dir(req) == READ)
				memcpy(buffer, (char *)addr, size);
			else
				memcpy((char *)addr, buffer, size);
			start += size;
			len -= size;
		}
	done:
		if (!__blk_end_request_cur(req, err))
			req = blk_fetch_request(q);
	}
}
Beispiel #28
0
static void htifbd_request(struct request_queue *q)
{
	struct request *req;

	req = blk_fetch_request(q);
	while (req != NULL) {
		struct htifbd_dev *dev;

		dev = req->rq_disk->private_data;
		if (req->cmd_type != REQ_TYPE_FS) {
			pr_notice(DRIVER_NAME ": ignoring non-fs request for %s\n",
				req->rq_disk->disk_name);
			__blk_end_request_all(req, -EIO);
			continue;
		}

		htifbd_transfer(dev, blk_rq_pos(req), blk_rq_cur_sectors(req),
			req->buffer, rq_data_dir(req));
		if (!__blk_end_request_cur(req, 0)) {
			req = blk_fetch_request(q);
		}
	}
}
Beispiel #29
0
static void ace_fsm_dostate(struct ace_device *ace)
{
	struct request *req;
	u32 status;
	u16 val;
	int count;

#if defined(DEBUG)
	dev_dbg(ace->dev, "fsm_state=%i, id_req_count=%i\n",
		ace->fsm_state, ace->id_req_count);
#endif

	/* Verify that there is actually a CF in the slot. If not, then
	 * bail out back to the idle state and wake up all the waiters */
	status = ace_in32(ace, ACE_STATUS);
	if ((status & ACE_STATUS_CFDETECT) == 0) {
		ace->fsm_state = ACE_FSM_STATE_IDLE;
		ace->media_change = 1;
		set_capacity(ace->gd, 0);
		dev_info(ace->dev, "No CF in slot\n");

		/* Drop all in-flight and pending requests */
		if (ace->req) {
			__blk_end_request_all(ace->req, -EIO);
			ace->req = NULL;
		}
		while ((req = blk_fetch_request(ace->queue)) != NULL)
			__blk_end_request_all(req, -EIO);

		/* Drop back to IDLE state and notify waiters */
		ace->fsm_state = ACE_FSM_STATE_IDLE;
		ace->id_result = -EIO;
		while (ace->id_req_count) {
			complete(&ace->id_completion);
			ace->id_req_count--;
		}
	}

	switch (ace->fsm_state) {
	case ACE_FSM_STATE_IDLE:
		/* See if there is anything to do */
		if (ace->id_req_count || ace_get_next_request(ace->queue)) {
			ace->fsm_iter_num++;
			ace->fsm_state = ACE_FSM_STATE_REQ_LOCK;
			mod_timer(&ace->stall_timer, jiffies + HZ);
			if (!timer_pending(&ace->stall_timer))
				add_timer(&ace->stall_timer);
			break;
		}
		del_timer(&ace->stall_timer);
		ace->fsm_continue_flag = 0;
		break;

	case ACE_FSM_STATE_REQ_LOCK:
		if (ace_in(ace, ACE_STATUS) & ACE_STATUS_MPULOCK) {
			/* Already have the lock, jump to next state */
			ace->fsm_state = ACE_FSM_STATE_WAIT_CFREADY;
			break;
		}

		/* Request the lock */
		val = ace_in(ace, ACE_CTRL);
		ace_out(ace, ACE_CTRL, val | ACE_CTRL_LOCKREQ);
		ace->fsm_state = ACE_FSM_STATE_WAIT_LOCK;
		break;

	case ACE_FSM_STATE_WAIT_LOCK:
		if (ace_in(ace, ACE_STATUS) & ACE_STATUS_MPULOCK) {
			/* got the lock; move to next state */
			ace->fsm_state = ACE_FSM_STATE_WAIT_CFREADY;
			break;
		}

		/* wait a bit for the lock */
		ace_fsm_yield(ace);
		break;

	case ACE_FSM_STATE_WAIT_CFREADY:
		status = ace_in32(ace, ACE_STATUS);
		if (!(status & ACE_STATUS_RDYFORCFCMD) ||
		    (status & ACE_STATUS_CFBSY)) {
			/* CF card isn't ready; it needs to be polled */
			ace_fsm_yield(ace);
			break;
		}

		/* Device is ready for command; determine what to do next */
		if (ace->id_req_count)
			ace->fsm_state = ACE_FSM_STATE_IDENTIFY_PREPARE;
		else
			ace->fsm_state = ACE_FSM_STATE_REQ_PREPARE;
		break;

	case ACE_FSM_STATE_IDENTIFY_PREPARE:
		/* Send identify command */
		ace->fsm_task = ACE_TASK_IDENTIFY;
		ace->data_ptr = ace->cf_id;
		ace->data_count = ACE_BUF_PER_SECTOR;
		ace_out(ace, ACE_SECCNTCMD, ACE_SECCNTCMD_IDENTIFY);

		/* As per datasheet, put config controller in reset */
		val = ace_in(ace, ACE_CTRL);
		ace_out(ace, ACE_CTRL, val | ACE_CTRL_CFGRESET);

		/* irq handler takes over from this point; wait for the
		 * transfer to complete */
		ace->fsm_state = ACE_FSM_STATE_IDENTIFY_TRANSFER;
		ace_fsm_yieldirq(ace);
		break;

	case ACE_FSM_STATE_IDENTIFY_TRANSFER:
		/* Check that the sysace is ready to receive data */
		status = ace_in32(ace, ACE_STATUS);
		if (status & ACE_STATUS_CFBSY) {
			dev_dbg(ace->dev, "CFBSY set; t=%i iter=%i dc=%i\n",
				ace->fsm_task, ace->fsm_iter_num,
				ace->data_count);
			ace_fsm_yield(ace);
			break;
		}
		if (!(status & ACE_STATUS_DATABUFRDY)) {
			ace_fsm_yield(ace);
			break;
		}

		/* Transfer the next buffer */
		ace->reg_ops->datain(ace);
		ace->data_count--;

		/* If there are still buffers to be transfers; jump out here */
		if (ace->data_count != 0) {
			ace_fsm_yieldirq(ace);
			break;
		}

		/* transfer finished; kick state machine */
		dev_dbg(ace->dev, "identify finished\n");
		ace->fsm_state = ACE_FSM_STATE_IDENTIFY_COMPLETE;
		break;

	case ACE_FSM_STATE_IDENTIFY_COMPLETE:
		ace_fix_driveid(ace->cf_id);
		ace_dump_mem(ace->cf_id, 512);	/* Debug: Dump out disk ID */

		if (ace->data_result) {
			/* Error occured, disable the disk */
			ace->media_change = 1;
			set_capacity(ace->gd, 0);
			dev_err(ace->dev, "error fetching CF id (%i)\n",
				ace->data_result);
		} else {
			ace->media_change = 0;

			/* Record disk parameters */
			set_capacity(ace->gd,
				ata_id_u32(ace->cf_id, ATA_ID_LBA_CAPACITY));
			dev_info(ace->dev, "capacity: %i sectors\n",
				ata_id_u32(ace->cf_id, ATA_ID_LBA_CAPACITY));
		}

		/* We're done, drop to IDLE state and notify waiters */
		ace->fsm_state = ACE_FSM_STATE_IDLE;
		ace->id_result = ace->data_result;
		while (ace->id_req_count) {
			complete(&ace->id_completion);
			ace->id_req_count--;
		}
		break;

	case ACE_FSM_STATE_REQ_PREPARE:
		req = ace_get_next_request(ace->queue);
		if (!req) {
			ace->fsm_state = ACE_FSM_STATE_IDLE;
			break;
		}
		blk_start_request(req);

		/* Okay, it's a data request, set it up for transfer */
		dev_dbg(ace->dev,
			"request: sec=%llx hcnt=%x, ccnt=%x, dir=%i\n",
			(unsigned long long)blk_rq_pos(req),
			blk_rq_sectors(req), blk_rq_cur_sectors(req),
			rq_data_dir(req));

		ace->req = req;
		ace->data_ptr = req->buffer;
		ace->data_count = blk_rq_cur_sectors(req) * ACE_BUF_PER_SECTOR;
		ace_out32(ace, ACE_MPULBA, blk_rq_pos(req) & 0x0FFFFFFF);

		count = blk_rq_sectors(req);
		if (rq_data_dir(req)) {
			/* Kick off write request */
			dev_dbg(ace->dev, "write data\n");
			ace->fsm_task = ACE_TASK_WRITE;
			ace_out(ace, ACE_SECCNTCMD,
				count | ACE_SECCNTCMD_WRITE_DATA);
		} else {
			/* Kick off read request */
			dev_dbg(ace->dev, "read data\n");
			ace->fsm_task = ACE_TASK_READ;
			ace_out(ace, ACE_SECCNTCMD,
				count | ACE_SECCNTCMD_READ_DATA);
		}

		/* As per datasheet, put config controller in reset */
		val = ace_in(ace, ACE_CTRL);
		ace_out(ace, ACE_CTRL, val | ACE_CTRL_CFGRESET);

		/* Move to the transfer state.  The systemace will raise
		 * an interrupt once there is something to do
		 */
		ace->fsm_state = ACE_FSM_STATE_REQ_TRANSFER;
		if (ace->fsm_task == ACE_TASK_READ)
			ace_fsm_yieldirq(ace);	/* wait for data ready */
		break;

	case ACE_FSM_STATE_REQ_TRANSFER:
		/* Check that the sysace is ready to receive data */
		status = ace_in32(ace, ACE_STATUS);
		if (status & ACE_STATUS_CFBSY) {
			dev_dbg(ace->dev,
				"CFBSY set; t=%i iter=%i c=%i dc=%i irq=%i\n",
				ace->fsm_task, ace->fsm_iter_num,
				blk_rq_cur_sectors(ace->req) * 16,
				ace->data_count, ace->in_irq);
			ace_fsm_yield(ace);	/* need to poll CFBSY bit */
			break;
		}
		if (!(status & ACE_STATUS_DATABUFRDY)) {
			dev_dbg(ace->dev,
				"DATABUF not set; t=%i iter=%i c=%i dc=%i irq=%i\n",
				ace->fsm_task, ace->fsm_iter_num,
				blk_rq_cur_sectors(ace->req) * 16,
				ace->data_count, ace->in_irq);
			ace_fsm_yieldirq(ace);
			break;
		}

		/* Transfer the next buffer */
		if (ace->fsm_task == ACE_TASK_WRITE)
			ace->reg_ops->dataout(ace);
		else
			ace->reg_ops->datain(ace);
		ace->data_count--;

		/* If there are still buffers to be transfers; jump out here */
		if (ace->data_count != 0) {
			ace_fsm_yieldirq(ace);
			break;
		}

		/* bio finished; is there another one? */
		if (__blk_end_request_cur(ace->req, 0)) {
			/* dev_dbg(ace->dev, "next block; h=%u c=%u\n",
			 *      blk_rq_sectors(ace->req),
			 *      blk_rq_cur_sectors(ace->req));
			 */
			ace->data_ptr = ace->req->buffer;
			ace->data_count = blk_rq_cur_sectors(ace->req) * 16;
			ace_fsm_yieldirq(ace);
			break;
		}

		ace->fsm_state = ACE_FSM_STATE_REQ_COMPLETE;
		break;

	case ACE_FSM_STATE_REQ_COMPLETE:
		ace->req = NULL;

		/* Finished request; go to idle state */
		ace->fsm_state = ACE_FSM_STATE_IDLE;
		break;

	default:
		ace->fsm_state = ACE_FSM_STATE_IDLE;
		break;
	}
}
static void ace_fsm_dostate(struct ace_device *ace)
{
	struct request *req;
	u32 status;
	u16 val;
	int count;

#if defined(DEBUG)
	dev_dbg(ace->dev, "fsm_state=%i, id_req_count=%i\n",
		ace->fsm_state, ace->id_req_count);
#endif

	/*                                                             
                                                                */
	status = ace_in32(ace, ACE_STATUS);
	if ((status & ACE_STATUS_CFDETECT) == 0) {
		ace->fsm_state = ACE_FSM_STATE_IDLE;
		ace->media_change = 1;
		set_capacity(ace->gd, 0);
		dev_info(ace->dev, "No CF in slot\n");

		/*                                         */
		if (ace->req) {
			__blk_end_request_all(ace->req, -EIO);
			ace->req = NULL;
		}
		while ((req = blk_fetch_request(ace->queue)) != NULL)
			__blk_end_request_all(req, -EIO);

		/*                                            */
		ace->fsm_state = ACE_FSM_STATE_IDLE;
		ace->id_result = -EIO;
		while (ace->id_req_count) {
			complete(&ace->id_completion);
			ace->id_req_count--;
		}
	}

	switch (ace->fsm_state) {
	case ACE_FSM_STATE_IDLE:
		/*                                */
		if (ace->id_req_count || ace_get_next_request(ace->queue)) {
			ace->fsm_iter_num++;
			ace->fsm_state = ACE_FSM_STATE_REQ_LOCK;
			mod_timer(&ace->stall_timer, jiffies + HZ);
			if (!timer_pending(&ace->stall_timer))
				add_timer(&ace->stall_timer);
			break;
		}
		del_timer(&ace->stall_timer);
		ace->fsm_continue_flag = 0;
		break;

	case ACE_FSM_STATE_REQ_LOCK:
		if (ace_in(ace, ACE_STATUS) & ACE_STATUS_MPULOCK) {
			/*                                           */
			ace->fsm_state = ACE_FSM_STATE_WAIT_CFREADY;
			break;
		}

		/*                  */
		val = ace_in(ace, ACE_CTRL);
		ace_out(ace, ACE_CTRL, val | ACE_CTRL_LOCKREQ);
		ace->fsm_state = ACE_FSM_STATE_WAIT_LOCK;
		break;

	case ACE_FSM_STATE_WAIT_LOCK:
		if (ace_in(ace, ACE_STATUS) & ACE_STATUS_MPULOCK) {
			/*                                  */
			ace->fsm_state = ACE_FSM_STATE_WAIT_CFREADY;
			break;
		}

		/*                         */
		ace_fsm_yield(ace);
		break;

	case ACE_FSM_STATE_WAIT_CFREADY:
		status = ace_in32(ace, ACE_STATUS);
		if (!(status & ACE_STATUS_RDYFORCFCMD) ||
		    (status & ACE_STATUS_CFBSY)) {
			/*                                            */
			ace_fsm_yield(ace);
			break;
		}

		/*                                                        */
		if (ace->id_req_count)
			ace->fsm_state = ACE_FSM_STATE_IDENTIFY_PREPARE;
		else
			ace->fsm_state = ACE_FSM_STATE_REQ_PREPARE;
		break;

	case ACE_FSM_STATE_IDENTIFY_PREPARE:
		/*                       */
		ace->fsm_task = ACE_TASK_IDENTIFY;
		ace->data_ptr = ace->cf_id;
		ace->data_count = ACE_BUF_PER_SECTOR;
		ace_out(ace, ACE_SECCNTCMD, ACE_SECCNTCMD_IDENTIFY);

		/*                                                  */
		val = ace_in(ace, ACE_CTRL);
		ace_out(ace, ACE_CTRL, val | ACE_CTRL_CFGRESET);

		/*                                                     
                          */
		ace->fsm_state = ACE_FSM_STATE_IDENTIFY_TRANSFER;
		ace_fsm_yieldirq(ace);
		break;

	case ACE_FSM_STATE_IDENTIFY_TRANSFER:
		/*                                                */
		status = ace_in32(ace, ACE_STATUS);
		if (status & ACE_STATUS_CFBSY) {
			dev_dbg(ace->dev, "CFBSY set; t=%i iter=%i dc=%i\n",
				ace->fsm_task, ace->fsm_iter_num,
				ace->data_count);
			ace_fsm_yield(ace);
			break;
		}
		if (!(status & ACE_STATUS_DATABUFRDY)) {
			ace_fsm_yield(ace);
			break;
		}

		/*                          */
		ace->reg_ops->datain(ace);
		ace->data_count--;

		/*                                                           */
		if (ace->data_count != 0) {
			ace_fsm_yieldirq(ace);
			break;
		}

		/*                                       */
		dev_dbg(ace->dev, "identify finished\n");
		ace->fsm_state = ACE_FSM_STATE_IDENTIFY_COMPLETE;
		break;

	case ACE_FSM_STATE_IDENTIFY_COMPLETE:
		ace_fix_driveid(ace->cf_id);
		ace_dump_mem(ace->cf_id, 512);	/*                         */

		if (ace->data_result) {
			/*                                  */
			ace->media_change = 1;
			set_capacity(ace->gd, 0);
			dev_err(ace->dev, "error fetching CF id (%i)\n",
				ace->data_result);
		} else {
			ace->media_change = 0;

			/*                        */
			set_capacity(ace->gd,
				ata_id_u32(ace->cf_id, ATA_ID_LBA_CAPACITY));
			dev_info(ace->dev, "capacity: %i sectors\n",
				ata_id_u32(ace->cf_id, ATA_ID_LBA_CAPACITY));
		}

		/*                                                   */
		ace->fsm_state = ACE_FSM_STATE_IDLE;
		ace->id_result = ace->data_result;
		while (ace->id_req_count) {
			complete(&ace->id_completion);
			ace->id_req_count--;
		}
		break;

	case ACE_FSM_STATE_REQ_PREPARE:
		req = ace_get_next_request(ace->queue);
		if (!req) {
			ace->fsm_state = ACE_FSM_STATE_IDLE;
			break;
		}
		blk_start_request(req);

		/*                                                   */
		dev_dbg(ace->dev,
			"request: sec=%llx hcnt=%x, ccnt=%x, dir=%i\n",
			(unsigned long long)blk_rq_pos(req),
			blk_rq_sectors(req), blk_rq_cur_sectors(req),
			rq_data_dir(req));

		ace->req = req;
		ace->data_ptr = req->buffer;
		ace->data_count = blk_rq_cur_sectors(req) * ACE_BUF_PER_SECTOR;
		ace_out32(ace, ACE_MPULBA, blk_rq_pos(req) & 0x0FFFFFFF);

		count = blk_rq_sectors(req);
		if (rq_data_dir(req)) {
			/*                        */
			dev_dbg(ace->dev, "write data\n");
			ace->fsm_task = ACE_TASK_WRITE;
			ace_out(ace, ACE_SECCNTCMD,
				count | ACE_SECCNTCMD_WRITE_DATA);
		} else {
			/*                       */
			dev_dbg(ace->dev, "read data\n");
			ace->fsm_task = ACE_TASK_READ;
			ace_out(ace, ACE_SECCNTCMD,
				count | ACE_SECCNTCMD_READ_DATA);
		}

		/*                                                  */
		val = ace_in(ace, ACE_CTRL);
		ace_out(ace, ACE_CTRL, val | ACE_CTRL_CFGRESET);

		/*                                                      
                                               
   */
		ace->fsm_state = ACE_FSM_STATE_REQ_TRANSFER;
		if (ace->fsm_task == ACE_TASK_READ)
			ace_fsm_yieldirq(ace);	/*                     */
		break;

	case ACE_FSM_STATE_REQ_TRANSFER:
		/*                                                */
		status = ace_in32(ace, ACE_STATUS);
		if (status & ACE_STATUS_CFBSY) {
			dev_dbg(ace->dev,
				"CFBSY set; t=%i iter=%i c=%i dc=%i irq=%i\n",
				ace->fsm_task, ace->fsm_iter_num,
				blk_rq_cur_sectors(ace->req) * 16,
				ace->data_count, ace->in_irq);
			ace_fsm_yield(ace);	/*                        */
			break;
		}
		if (!(status & ACE_STATUS_DATABUFRDY)) {
			dev_dbg(ace->dev,
				"DATABUF not set; t=%i iter=%i c=%i dc=%i irq=%i\n",
				ace->fsm_task, ace->fsm_iter_num,
				blk_rq_cur_sectors(ace->req) * 16,
				ace->data_count, ace->in_irq);
			ace_fsm_yieldirq(ace);
			break;
		}

		/*                          */
		if (ace->fsm_task == ACE_TASK_WRITE)
			ace->reg_ops->dataout(ace);
		else
			ace->reg_ops->datain(ace);
		ace->data_count--;

		/*                                                           */
		if (ace->data_count != 0) {
			ace_fsm_yieldirq(ace);
			break;
		}

		/*                                     */
		if (__blk_end_request_cur(ace->req, 0)) {
			/*                                             
                                    
                                         
    */
			ace->data_ptr = ace->req->buffer;
			ace->data_count = blk_rq_cur_sectors(ace->req) * 16;
			ace_fsm_yieldirq(ace);
			break;
		}

		ace->fsm_state = ACE_FSM_STATE_REQ_COMPLETE;
		break;

	case ACE_FSM_STATE_REQ_COMPLETE:
		ace->req = NULL;

		/*                                    */
		ace->fsm_state = ACE_FSM_STATE_IDLE;
		break;

	default:
		ace->fsm_state = ACE_FSM_STATE_IDLE;
		break;
	}
}