Esempio n. 1
0
static void simpleblkdrv_do_request(struct request_queue *q)
{
	struct request *req;
	while ((req = elv_next_request(q)) != NULL)
	{
		if ((req->sector + req->current_nr_sectors) << 9 > BLK_BYTES)
		{
			printk(KERN_ERR"request error!\n");
			end_request(req, 0);/*transfer fail*/
			continue;
		}

		switch (rq_data_dir(req))
		{
			case READ:
				memcpy(req->buffer, blkdev_data + (req->sector << 9), req->current_nr_sectors << 9);
				end_request(req, 1);/*transfer ok*/
				break;
			case WRITE:
				memcpy(blkdev_data + (req->sector << 9), req->buffer, req->current_nr_sectors << 9);
				end_request(req, 1);/*transfer ok*/
				break;
			default:
				/* No default because rq_data_dir(req) is 1 bit */
				break;
		}
	}
}
Esempio n. 2
0
static ssize_t
omap_mbox_read(struct device *dev, struct device_attribute *attr, char *buf)
{
    unsigned long flags;
    struct request *rq;
    mbox_msg_t *p = (mbox_msg_t *) buf;
    struct omap_mbox *mbox = dev_get_drvdata(dev);
    struct request_queue *q = mbox->rxq->queue;

    while (1) {
        spin_lock_irqsave(q->queue_lock, flags);
        rq = elv_next_request(q);
        spin_unlock_irqrestore(q->queue_lock, flags);

        if (!rq)
            break;

        *p = (mbox_msg_t) rq->data;

        if (blk_end_request(rq, 0, 0))
            BUG();

        if (unlikely(mbox_seq_test(mbox, *p))) {
            pr_info("mbox: Illegal seq bit!(%08x) ignored\n", *p);
            continue;
        }
        p++;
    }

    pr_debug("%02x %02x %02x %02x\n", buf[0], buf[1], buf[2], buf[3]);

    return (size_t) ((char *)p - buf);
}
Esempio n. 3
0
/*
 * Generic MMC request handler.  This is called for any queue on a
 * particular host.  When the host is not busy, we look for a request
 * on any queue on this host, and attempt to issue it.  This may
 * not be the queue we were asked to process.
 */
static void mmc_request(struct request_queue *q)
{
	struct mmc_queue *mq = q->queuedata;
	struct request *req;
	int ret;

	if (!mq) {
#ifndef CONFIG_ARCH_EMXX
		printk(KERN_ERR "MMC: killing requests for dead queue\n");
#endif
		while ((req = elv_next_request(q)) != NULL) {
#ifdef CONFIG_ARCH_EMXX
			req->cmd_flags |= REQ_QUIET;
#endif
			do {
				ret = __blk_end_request(req, -EIO,
							blk_rq_cur_bytes(req));
			} while (ret);
		}
		return;
	}

	if (!mq->req)
		wake_up_process(mq->thread);
}
Esempio n. 4
0
static void do_mbd_request(request_queue_t * q)
{
	int result = 0;
	struct request *req;

	while ((req = elv_next_request(q)) != NULL) {
		int minor = req->rq_disk->first_minor;

		switch (rq_data_dir(req)) {
		case READ:
			result = MamboBogusDiskRead(minor,
						    req->buffer, req->sector,
						    req->current_nr_sectors);
			break;
		case WRITE:
			result = MamboBogusDiskWrite(minor,
						     req->buffer, req->sector,
						     req->current_nr_sectors);
		};

		if (result)
			end_request(req, 0);	/* failure */
		else
			end_request(req, 1);	/* success */
	}
}
static int mtd_blktrans_thread(void *arg)
{
	struct mtd_blktrans_ops *tr = arg;
	struct request_queue *rq = tr->blkcore_priv->rq;

	/* we might get involved when memory gets low, so use PF_MEMALLOC */
	current->flags |= PF_MEMALLOC | PF_NOFREEZE;

	daemonize("%sd", tr->name);

	/* daemonize() doesn't do this for us since some kernel threads
	   actually want to deal with signals. We can't just call
	   exit_sighand() since that'll cause an oops when we finally
	   do exit. */
	spin_lock_irq(&current->sighand->siglock);
	sigfillset(&current->blocked);
	recalc_sigpending();
	spin_unlock_irq(&current->sighand->siglock);

	spin_lock_irq(rq->queue_lock);

	while (!tr->blkcore_priv->exiting) {
		struct request *req;
		struct mtd_blktrans_dev *dev;
		int res = 0;
		DECLARE_WAITQUEUE(wait, current);

		req = elv_next_request(rq);

		if (!req) {
			add_wait_queue(&tr->blkcore_priv->thread_wq, &wait);
			set_current_state(TASK_INTERRUPTIBLE);

			spin_unlock_irq(rq->queue_lock);

			schedule();
			remove_wait_queue(&tr->blkcore_priv->thread_wq, &wait);

			spin_lock_irq(rq->queue_lock);

			continue;
		}

		dev = req->rq_disk->private_data;
		tr = dev->tr;

		spin_unlock_irq(rq->queue_lock);

		down(&dev->sem);
		res = do_blktrans_request(tr, dev, req);
		up(&dev->sem);

		spin_lock_irq(rq->queue_lock);

		end_request(req, res);
	}
	spin_unlock_irq(rq->queue_lock);

	complete_and_exit(&tr->blkcore_priv->thread_dead, 0);
}
Esempio n. 6
0
static void do_ramdisk_request (request_queue_t * q)
{
	static int r_cnt = 0;
	static int w_cnt = 0;
	struct request *req;

//	printk("do_ramdisk_request = %d \n",++cnt);
	while ((req = elv_next_request(q)) != NULL)
	{
		/*数据传输3要素 : 源,目的,长度*/
		/*源/目的*/
		unsigned long offset = req->sector *512;

		/*目的/源*/
		//req->buffer
		/*长度*/
		unsigned long len  = req->current_nr_sectors *512;

		if (rq_data_dir(req) == READ)
		{
			printk("do_ramdisk_request read = %d \n",++r_cnt);
			memcpy(req->buffer, ramblock_buf+offset, len);
			
		}
		else
		{
			printk("do_ramdisk_request wirte = %d \n",++w_cnt);
			memcpy(ramblock_buf+offset,req->buffer,  len);
		}
		
		end_request(req, 1);	/* wrap up, 0 = fail, 1 = success */
	}	
}
static void Virtual_blkdev_do_request(struct request_queue *q)
{
	struct request *req;
	while ((req = elv_next_request(q)) != NULL) 
	{
		if ((req->sector + req->current_nr_sectors) << 9> VIRTUAL_BLKDEV_BYTES) 
		{
		printk(KERN_ERR VIRTUAL_BLKDEV_DISKNAME": bad request: block=%llu, count=%u\n",
			(unsigned long long)req->sector,
			req->current_nr_sectors);
		end_request(req, 0);
		continue;
	    }/*endif*/
		switch (rq_data_dir(req)) 
		{
			case READ:
				memcpy(req->buffer,Virtual_blkdev_data + (req->sector << 9),
					req->current_nr_sectors << 9);
				end_request(req, 1);
				break;
			case WRITE:
				memcpy(Virtual_blkdev_data + (req->sector << 9),
					req->buffer, req->current_nr_sectors << 9);
				end_request(req, 1);
				break;
			default:
				/* No default because rq_data_dir(req) is 1 bit */
				break;
		}
	}/*endwhile*/
}
Esempio n. 8
0
static void do_z2_request(struct request_queue *q)
{
	struct request *req;
	while ((req = elv_next_request(q)) != NULL) {
		unsigned long start = req->sector << 9;
		unsigned long len  = req->current_nr_sectors << 9;

		if (start + len > z2ram_size) {
			printk( KERN_ERR DEVICE_NAME ": bad access: block=%lu, count=%u\n",
				req->sector, req->current_nr_sectors);
			end_request(req, 0);
			continue;
		}
		while (len) {
			unsigned long addr = start & Z2RAM_CHUNKMASK;
			unsigned long size = Z2RAM_CHUNKSIZE - addr;
			if (len < size)
				size = len;
			addr += z2ram_map[ start >> Z2RAM_CHUNKSHIFT ];
			if (rq_data_dir(req) == READ)
				memcpy(req->buffer, (char *)addr, size);
			else
				memcpy((char *)addr, req->buffer, size);
			start += size;
			len -= size;
		}
		end_request(req, 1);
	}
}
static void virtualblockdevice_do_request(struct request_queue *q)
{
	struct request *req;

//	printk(KERN_ALERT "VirtualBlockDevice: Entry virtualblockdevice_do_request !\n");

	while( NULL != ( req = elv_next_request( q ) ) ) {
		if( ( ( req->sector + req->current_nr_sectors ) << 9 ) > VIRTUALBLOCKDEVICE_DISK_CAPACITY ) {
			printk(KERN_ALERT "VirtualBlockDevice: bad request: start sector: = %llu\t sector count: = %lu \n", (unsigned long long) req->sector, (unsigned long)req->current_nr_sectors);
			end_request( req, 0 );
			continue;
		}
		printk(KERN_ALERT "VirtualBlockDevice: request: start sector: = %llu\t sector count: = %lu \n", (unsigned long long) req->sector, (unsigned long)req->current_nr_sectors);

		switch( rq_data_dir( req ) ) {
		case READ:
			memcpy( req->buffer, (virtualblockdevice_data + (req->sector << 9)), (req->current_nr_sectors << 9) );
			end_request( req, 1 );
			break;
		case WRITE:
			memcpy( (virtualblockdevice_data + (req->sector << 9)), req->buffer, (req->current_nr_sectors << 9) );
			end_request( req, 1 );
			break;
		default:
			printk(KERN_ALERT "VirtualBlockDevice: Unknown data direction !\n");
			break;
		}
	}
}
Esempio n. 10
0
static void do_ramblock_request(request_queue_t * q)
{
	static int r_cnt = 0;
	static int w_cnt = 0;
	struct request *req;
	
	//printk("do_ramblock_request %d\n", ++cnt);

	while ((req = elv_next_request(q)) != NULL) {
		/* 数据传输三要素: 源,目的,长度 */
		/* 源/目的: */
		unsigned long offset = req->sector * 512;

		/* 目的/源: */
		// req->buffer

		/* 长度: */		
		unsigned long len = req->current_nr_sectors * 512;

		if (rq_data_dir(req) == READ)
		{
			//printk("do_ramblock_request read %d\n", ++r_cnt);
			memcpy(req->buffer, ramblock_buf+offset, len);
		}
		else
		{
			//printk("do_ramblock_request write %d\n", ++w_cnt);
			memcpy(ramblock_buf+offset, req->buffer, len);
		}		
		
		end_request(req, 1);
	}
}
Esempio n. 11
0
static void do_ramblock_request(request_queue_t *q)
{
	struct request *req;

	while ((req = elv_next_request(q)) != NULL) 
	{
		/* 数据传输三要素:源,目的,长度 */
		/* 源/目的 */
		unsigned long offset = req->sector << 9;

		/* 目的/源 */
		/* req->buffer */
		
		/* 长度 */
		unsigned long len  = req->current_nr_sectors << 9;

		if (rq_data_dir(req) == READ)
		{
			memcpy(req->buffer, ramblock_buf+offset, len);
		}
		else
		{
			memcpy(ramblock_buf+offset, req->buffer, len);
		}

 		end_request(req, 1);
	}
	
}
Esempio n. 12
0
/*
 * Generic MMC request handler.  This is called for any queue on a
 * particular host.  When the host is not busy, we look for a request
 * on any queue on this host, and attempt to issue it.  This may
 * not be the queue we were asked to process.
 */
static void mmc_request(struct request_queue *q)
{
	struct mmc_queue *mq = q->queuedata;
	struct request *req;
	int ret, count = 0;

	if (!mq) {
		printk(KERN_ERR "MMC: killing requests for dead queue\n");
		while ((req = elv_next_request(q)) != NULL) {
			do {
				ret = end_that_request_chunk(req, 0,
					req->current_nr_sectors << 9);
				count++;
				if (count > 100) {
				/* dequeue the request in the queue */
					printk(KERN_ERR "dequeue req\n");
					blkdev_dequeue_request(req);
					break;
				}
			count++;
			} while (ret);
		}
		return;
	}
	if (!mq->req)
		wake_up_process(mq->thread);
}
static void mem_block_requeut_fn(struct request_queue* q)
{
    struct request* req = NULL;
   while(NULL != (req = elv_next_request(q))) 
   {
       if(req -> sector + req -> current_nr_sectors > get_capacity(req->rq_disk))
       {
           end_request(req,0);
          // return 0;
          LogPath();
          continue;
       }
  //  Log("sector:%d,current_nr_sectors:%d",req->sector,req->current_nr_sectors);
       switch(rq_data_dir(req))
       {
           case READ:
           {
               memcpy(req->buffer,g_mem_buf + (req->sector << 9),req->current_nr_sectors << 9);
               end_request(req,1);
               break;
           }
           case WRITE:
           {
               memcpy(g_mem_buf + (req->sector << 9), req->buffer,req->current_nr_sectors << 9);
               end_request(req,1);
                break;
           }
           default:
                Log("[Error] Unknown request...");
				break;
             //   return 0;
       }
   }
}
Esempio n. 14
0
static void mbox_tx_work(struct work_struct *work)
{
    int ret;
    struct request *rq;
    struct omap_mbox_queue *mq = container_of(work,
                struct omap_mbox_queue, work);
    struct omap_mbox *mbox = mq->queue->queuedata;
    struct request_queue *q = mbox->txq->queue;

    while (1) {
        spin_lock(q->queue_lock);
        rq = elv_next_request(q);
        spin_unlock(q->queue_lock);

        if (!rq)
            break;

        ret = __mbox_msg_send(mbox, (mbox_msg_t) rq->data, rq->special);
        if (ret) {
            enable_mbox_irq(mbox, IRQ_TX);
            return;
        }

        spin_lock(q->queue_lock);
        if (__blk_end_request(rq, 0, 0))
            BUG();
        spin_unlock(q->queue_lock);
    }
}
Esempio n. 15
0
/*
 * Message receiver(workqueue)
 */
static void mbox_rx_work(struct work_struct *work)
{
    struct omap_mbox_queue *mq =
            container_of(work, struct omap_mbox_queue, work);
    struct omap_mbox *mbox = mq->queue->queuedata;
    struct request_queue *q = mbox->rxq->queue;
    struct request *rq;
    mbox_msg_t msg;
    unsigned long flags;

    if (mbox->rxq->callback == NULL) {
        sysfs_notify(&mbox->dev.kobj, NULL, "mbox");
        return;
    }

    while (1) {
        spin_lock_irqsave(q->queue_lock, flags);
        rq = elv_next_request(q);
        spin_unlock_irqrestore(q->queue_lock, flags);
        if (!rq)
            break;

        msg = (mbox_msg_t) rq->data;

        if (blk_end_request(rq, 0, 0))
            BUG();

        mbox->rxq->callback((void *)msg);
    }
}
Esempio n. 16
0
static void request_exemple(request_queue_t * rqueue)
{
	unsigned long secteur_debut;
	unsigned long nb_secteurs;
	struct request * rq;

	while ((rq = elv_next_request(rqueue)) != NULL) {
		if (! blk_fs_request(rq)) {
			end_request(rq, 0);
			continue;
		}
		
		/*
		 * Les numeros de secteurs pour le transfert correspondent 
		 * a des secteurs de 512 octets... -> convertir.
		 */
		secteur_debut = rq->sector * 512 / lg_sect_exemple;
		nb_secteurs   = rq->current_nr_sectors * 512 / lg_sect_exemple;
		if (secteur_debut + nb_secteurs > nb_sect_exemple) {
			end_request(rq,1);
			continue;
		}
		if (rq_data_dir(rq)) /* write */
			memmove(& data_exemple[secteur_debut * lg_sect_exemple],
			        rq->buffer,
			        nb_secteurs * lg_sect_exemple);
		else /* read */
			memmove(rq->buffer,
			        & data_exemple[secteur_debut * lg_sect_exemple],
			        nb_secteurs * lg_sect_exemple);
		end_request(rq, 1);
	}
}
Esempio n. 17
0
static void osprd_process_request_queue(request_queue_t *q)
{
	osprd_info_t *d = (osprd_info_t *) q->queuedata;
	struct request *req;

	while ((req = elv_next_request(q)) != NULL)
		osprd_process_request(d, req);
}
Esempio n. 18
0
static void start_request(struct floppy_state *fs)
{
	struct request *req;
	unsigned long x;

	if (fs->state == idle && fs->wanted) {
		fs->state = available;
		wake_up(&fs->wait);
		return;
	}
	while (fs->state == idle && (req = elv_next_request(swim3_queue))) {
#if 0
		printk("do_fd_req: dev=%s cmd=%d sec=%ld nr_sec=%ld buf=%p\n",
		       req->rq_disk->disk_name, req->cmd,
		       (long)req->sector, req->nr_sectors, req->buffer);
		printk("           rq_status=%d errors=%d current_nr_sectors=%ld\n",
		       req->rq_status, req->errors, req->current_nr_sectors);
#endif

		if (req->sector < 0 || req->sector >= fs->total_secs) {
			end_request(req, 0);
			continue;
		}
		if (req->current_nr_sectors == 0) {
			end_request(req, 1);
			continue;
		}
		if (fs->ejected) {
			end_request(req, 0);
			continue;
		}

		if (rq_data_dir(req) == WRITE) {
			if (fs->write_prot < 0)
				fs->write_prot = swim3_readbit(fs, WRITE_PROT);
			if (fs->write_prot) {
				end_request(req, 0);
				continue;
			}
		}

		/* Do not remove the cast. req->sector is now a sector_t and
		 * can be 64 bits, but it will never go past 32 bits for this
		 * driver anyway, so we can safely cast it down and not have
		 * to do a 64/32 division
		 */
		fs->req_cyl = ((long)req->sector) / fs->secpercyl;
		x = ((long)req->sector) % fs->secpercyl;
		fs->head = x / fs->secpertrack;
		fs->req_sector = x % fs->secpertrack + 1;
		fd_req = req;
		fs->state = do_transfer;
		fs->retries = 0;

		act(fs);
	}
}
Esempio n. 19
0
static void tbio_request(request_queue_t *q)
{
	struct request *req;

	while (( req = elv_next_request(q)) != NULL) {

		tbio_transfer(req , &Device);
		end_request(req , 1);
	}
}
Esempio n. 20
0
/* Get the next read/write request; ending requests that we don't handle */
struct request *ace_get_next_request(struct request_queue * q)
{
	struct request *req;

	while ((req = elv_next_request(q)) != NULL) {
		if (blk_fs_request(req))
			break;
		end_request(req, 0);
	}
	return req;
}
Esempio n. 21
0
static void start_request(struct floppy_state *fs)
{
	struct request *req;
	unsigned long x;

	if (fs->state == idle && fs->wanted) {
		fs->state = available;
		wake_up(&fs->wait);
		return;
	}
	while (fs->state == idle && (req = elv_next_request(swim3_queue))) {
#if 0
		printk("do_fd_req: dev=%s cmd=%d sec=%ld nr_sec=%ld buf=%p\n",
		       req->rq_disk->disk_name, req->cmd,
		       req->sector, req->nr_sectors, req->buffer);
		printk("           rq_status=%d errors=%d current_nr_sectors=%ld\n",
		       req->rq_status, req->errors, req->current_nr_sectors);
#endif

		if (req->sector < 0 || req->sector >= fs->total_secs) {
			end_request(req, 0);
			continue;
		}
		if (req->current_nr_sectors == 0) {
			end_request(req, 1);
			continue;
		}
		if (fs->ejected) {
			end_request(req, 0);
			continue;
		}

		if (rq_data_dir(req) == WRITE) {
			if (fs->write_prot < 0)
				fs->write_prot = swim3_readbit(fs, WRITE_PROT);
			if (fs->write_prot) {
				end_request(req, 0);
				continue;
			}
		}

		fs->req_cyl = req->sector / fs->secpercyl;
		x = req->sector % fs->secpercyl;
		fs->head = x / fs->secpertrack;
		fs->req_sector = x % fs->secpertrack + 1;
		fd_req = req;
		fs->state = do_transfer;
		fs->retries = 0;

		act(fs);
	}
}
Esempio n. 22
0
/*
 * The simple form of the request function.
 */
static void ubiblk_request(request_queue_t *q)
{
	struct request *req;

	while ((req = elv_next_request(q)) != NULL) {
		struct ubiblk_dev *dev = req->rq_disk->private_data;
		if(! blk_fs_request(req)) {
			printk (KERN_NOTICE "Skip non-fs request\n");
			end_request(req, 0);
			continue;
		}
		ubiblk_transfer(dev, req->sector, req->current_nr_sectors,
				req->buffer, rq_data_dir(req));
		end_request(req, 1);
	}
}
Esempio n. 23
0
static void simpleblockrequest(struct request_queue *q)
{
    struct request *req;
	
    while ((req = elv_next_request(q)) != NULL) 
	{
		if (! blk_fs_request(req)) 
		{
			printk (KERN_NOTICE "Skip non-CMD request\n");
			end_request(req, 0);
			continue;
		}
		simpleblocktransfer(&Device, req->sector, req->current_nr_sectors,
			req->buffer, rq_data_dir(req));
		end_request(req, 1);
    }
}
Esempio n. 24
0
static int mmc_queue_thread(void *d)
{
	struct mmc_queue *mq = d;
	struct request_queue *q = mq->queue;
	DECLARE_WAITQUEUE(wait, current);

	/*
	 * Set iothread to ensure that we aren't put to sleep by
	 * the process freezing.  We handle suspension ourselves.
	 */
	current->flags |= PF_MEMALLOC|PF_NOFREEZE;

	daemonize("mmcqd");

	complete(&mq->thread_complete);

	down(&mq->thread_sem);
	add_wait_queue(&mq->thread_wq, &wait);
	do {
		struct request *req = NULL;

		spin_lock_irq(q->queue_lock);
		set_current_state(TASK_INTERRUPTIBLE);
		if (!blk_queue_plugged(q))
			mq->req = req = elv_next_request(q);
		spin_unlock(q->queue_lock);

		if (!req) {
			if (mq->flags & MMC_QUEUE_EXIT)
				break;
			up(&mq->thread_sem);
			schedule();
			down(&mq->thread_sem);
			continue;
		}
		set_current_state(TASK_RUNNING);

		mq->issue_fn(mq, req);
	} while (1);
	remove_wait_queue(&mq->thread_wq, &wait);
	up(&mq->thread_sem);

	complete_and_exit(&mq->thread_complete, 0);
	return 0;
}
Esempio n. 25
0
static void read_intr(void)
{
	struct request *req;
	int i, retries = 100000;

	do {
		i = (unsigned) inb_p(HD_STATUS);
		if (i & BUSY_STAT)
			continue;
		if (!OK_STATUS(i))
			break;
		if (i & DRQ_STAT)
			goto ok_to_read;
	} while (--retries > 0);
	dump_status("read_intr", i);
	bad_rw_intr();
	hd_request();
	return;
ok_to_read:
	req = CURRENT;
	insw(HD_DATA,req->buffer,256);
	req->sector++;
	req->buffer += 512;
	req->errors = 0;
	i = --req->nr_sectors;
	--req->current_nr_sectors;
#ifdef DEBUG
	printk("%s: read: sector %ld, remaining = %ld, buffer=%p\n",
		req->rq_disk->disk_name, req->sector, req->nr_sectors,
		req->buffer+512);
#endif
	if (req->current_nr_sectors <= 0)
		end_request(req, 1);
	if (i > 0) {
		SET_HANDLER(&read_intr);
		return;
	}
	(void) inb_p(HD_STATUS);
#if (HD_DELAY > 0)
	last_req = read_timer();
#endif
	if (elv_next_request(QUEUE))
		hd_request();
	return;
}
Esempio n. 26
0
/*
 * The simple form of the request function.
 */
static void sbull_request(request_queue_t *q)
{
	struct request *req;

	while ((req = elv_next_request(q)) != NULL) {
		struct sbull_dev *dev = req->rq_disk->private_data;
		if (! blk_fs_request(req)) {
			printk (KERN_NOTICE "Skip non-fs request\n");
			end_request(req, 0);
			continue;
		}
    //    	printk (KERN_NOTICE "Req dev %d dir %ld sec %ld, nr %d f %lx\n",
    //    			dev - Devices, rq_data_dir(req),
    //    			req->sector, req->current_nr_sectors,
    //    			req->flags);
		sbull_transfer(dev, req->sector, req->current_nr_sectors,
				req->buffer, rq_data_dir(req));
		end_request(req, 1);
	}
}
Esempio n. 27
0
/*
 * Generic MMC request handler.  This is called for any queue on a
 * particular host.  When the host is not busy, we look for a request
 * on any queue on this host, and attempt to issue it.  This may
 * not be the queue we were asked to process.
 */
static void mmc_request(struct request_queue *q)
{
	struct mmc_queue *mq = q->queuedata;
	struct request *req;
	int ret;

	if (!mq) {
		while ((req = elv_next_request(q)) != NULL) {
			req->cmd_flags |= REQ_QUIET;
			do {
				ret = __blk_end_request(req, -EIO,
							blk_rq_cur_bytes(req));
			} while (ret);
		}
		return;
	}

	if (!mq->req)
		wake_up_process(mq->thread);
}
Esempio n. 28
0
/*
 * Generic MMC request handler.  This is called for any queue on a
 * particular host.  When the host is not busy, we look for a request
 * on any queue on this host, and attempt to issue it.  This may
 * not be the queue we were asked to process.
 */
static void mmc_request(struct request_queue *q)
{
	struct mmc_queue *mq = q->queuedata;
	struct request *req;
	int ret;

	if (!mq) {
		printk(KERN_ERR "MMC: killing requests for dead queue\n");
		while ((req = elv_next_request(q)) != NULL) {
			do {
				ret = end_that_request_chunk(req, 0,
					req->current_nr_sectors << 9);
			} while (ret);
		}
		return;
	}

	if (!mq->req)
		wake_up_process(mq->thread);
}
Esempio n. 29
0
/*
 * Generic MMC request handler.  This is called for any queue on a
 * particular host.  When the host is not busy, we look for a request
 * on any queue on this host, and attempt to issue it.  This may
 * not be the queue we were asked to process.
 */
static void mmc_request(struct request_queue *q)
{
	struct mmc_queue *mq = q->queuedata;
	struct request *req;
	int ret;

	if (!mq) {
		printk(KERN_ERR "MMC: killing requests for dead queue\n");
		while ((req = elv_next_request(q)) != NULL) {
			do {
				ret = __blk_end_request(req, -EIO,
							blk_rq_cur_bytes(req));
			} while (ret);
		}
		return;
	}

	if (!mq->req)
		wake_up_process(mq->thread);
}
Esempio n. 30
0
static void my_request (struct request_queue *q)
{
    struct request *rq;
    int size;
    char *ptr;
    unsigned nr_sectors, sector;

    printk (KERN_INFO "entering request routine\n");

    while ((rq = elv_next_request (q))) {
        if (!blk_fs_request (rq)) {
            printk (KERN_WARNING "This was not a normal fs request, skipping\n");
            end_request (rq, 0);
            continue;
        }

        nr_sectors = rq->current_nr_sectors;
        sector = rq->sector;
        ptr = my_dev + sector * sector_size;
        size = nr_sectors * sector_size;

        if ((ptr + size) > (my_dev + disk_size)) {
            printk (KERN_WARNING " tried to go past end of device\n");
            end_request (rq, 0);
            continue;
        }

        if (rq_data_dir (rq)) {
            printk (KERN_INFO "writing at sector %d, %ud sectors \n",
                    sector, nr_sectors);
            memcpy (ptr, rq->buffer, size);
        } else {
            printk (KERN_INFO "reading at sector %d, %ud sectors \n",
                    sector, nr_sectors);
            memcpy (rq->buffer, ptr, size);
        }

        end_request (rq, 1);
    }
    printk (KERN_INFO "leaving request\n");
}