/**ltl
 * 功能
 * 参数
 * 返回值
 * 说明:此函数不能调用blk_end_request_all函数,因为blk_end_request_all持有请求队列锁,用的话就会导致死锁。
 *      Q:为什么调用blk_end_request_all函数就会发生死锁呢?
 */
static void mem_block_requeut_fn(struct request_queue* q)
{
	struct request* req = NULL;
	while(NULL != (req = blk_fetch_request(q)))//
	{
		if(blk_rq_pos(req) + blk_rq_cur_sectors(req) > get_capacity(req->rq_disk))
		{
			__blk_end_request_all(req,-EIO); /* 不能被blk_end_request_all替换 */
			continue;
		}

		switch(rq_data_dir(req))
		{
			case READ:
                {
				    memcpy(req->buffer,g_mem_buf + (blk_rq_pos(req) << 9),blk_rq_cur_bytes(req));
				    __blk_end_request_all(req,0); /* 不能被blk_end_request_all替换 */
				    break;
                }
			case WRITE:
                {
				    memcpy(g_mem_buf + (blk_rq_pos(req) << 9),req->buffer,blk_rq_cur_bytes(req));
				    __blk_end_request_all(req,0); /* 不能被blk_end_request_all替换 */
				    break;
                }
			default:
				__blk_end_request_all(req,-EIO); /* 不能被blk_end_request_all替换 */
				break;
		}
	}
  //  BLK_PLog("req:0x%p",req);
}
Exemplo n.º 2
0
/*
 * simp_blkdev_make_request
 */
static void simp_blkdev_do_request(struct request_queue *q)
{
	struct request *req;
	struct req_iterator ri;
	struct bio_vec *bvec;
	char *disk_mem;
	char *buffer;
		
	while ((req = blk_fetch_request(q)) != NULL) {
		if ((blk_rq_pos(req) << 9) + blk_rq_cur_bytes(req)
			> SIMP_BLKDEV_BYTES) {
			printk(KERN_ERR SIMP_BLKDEV_DISKNAME
				": bad request: block=%llu, count=%u\n",
				(unsigned long long)blk_rq_pos(req),
				blk_rq_cur_bytes(req));
				blk_end_request_all(req, -EIO);
			continue;
		}
		
		disk_mem = simp_blkdev_data + (blk_rq_pos(req) << 9);
		switch (rq_data_dir(req)) {
		case READ:	
			rq_for_each_segment(bvec, req, ri)
			{
				buffer = kmap(bvec->bv_page) + bvec->bv_offset;
				memcpy(buffer, disk_mem, bvec->bv_len);
				kunmap(bvec->bv_page);
				disk_mem += bvec->bv_len;
			}
			
			/*memcpy(req->buffer,
			simp_blkdev_data + (blk_rq_pos(req) << 9),
			blk_rq_cur_bytes(req));*/
			__blk_end_request_all(req, 0);
			break;
		case WRITE:		
			rq_for_each_segment(bvec, req, ri)
			{
				buffer = kmap(bvec->bv_page) + bvec->bv_offset;
				memcpy(disk_mem, buffer, bvec->bv_len);
				kunmap(bvec->bv_page);
				disk_mem += bvec->bv_len;
			}
			/*memcpy(simp_blkdev_data + (blk_rq_pos(req) << 9),
			req->buffer, blk_rq_cur_bytes(req));*/
			__blk_end_request_all(req, 0);
			break;
		default:
			/* No default because rq_data_dir(req) is 1 bit */
			break;
		}
Exemplo n.º 3
0
void ramhd_req_func(struct request_queue* q) {
    struct request* req;
    RAMHD_DEV* pdev;
    char* pData;
    unsigned long addr, size, start;
    req = blk_fetch_request(q);

    while (req) {
        start = blk_rq_pos(req); // The sector cursor of the current request
        pdev = (RAMHD_DEV*)req->rq_disk->private_data;
        pData = pdev->data;
        addr = (unsigned long)pData + start * RAMHD_SECTOR_SIZE;
        size = blk_rq_cur_bytes(req);

        if (rq_data_dir(req) == READ) {
            memcpy(req->buffer, (char*)addr, size);
        } else {
            memcpy((char*)addr, req->buffer, size);
        }

        if (!__blk_end_request_cur(req, 0)) {
            req = blk_fetch_request(q);
        }
    }
}
Exemplo n.º 4
0
/*
 * Generic MMC request handler.  This is called for any queue on a
 * particular host.  When the host is not busy, we look for a request
 * on any queue on this host, and attempt to issue it.  This may
 * not be the queue we were asked to process.
 */
static void mmc_request(struct request_queue *q)
{
	struct mmc_queue *mq = q->queuedata;
	struct request *req;
	int ret;

	if (!mq) {
#ifndef CONFIG_ARCH_EMXX
		printk(KERN_ERR "MMC: killing requests for dead queue\n");
#endif
		while ((req = elv_next_request(q)) != NULL) {
#ifdef CONFIG_ARCH_EMXX
			req->cmd_flags |= REQ_QUIET;
#endif
			do {
				ret = __blk_end_request(req, -EIO,
							blk_rq_cur_bytes(req));
			} while (ret);
		}
		return;
	}

	if (!mq->req)
		wake_up_process(mq->thread);
}
Exemplo n.º 5
0
static void ramblock_do_request(struct request_queue * q)
{
	struct request *req;
	printk("do:ramblock_do_request\n");
	req = blk_fetch_request(q);
	while (req) {
		/*源或目的*/
		unsigned long offset = blk_rq_pos(req) * 512;

		/*目的或源*/
		//req->buffer
		
		/*长度*/
		unsigned long len  = blk_rq_cur_bytes(req);
		if (rq_data_dir(req) == READ)
			memcpy(req->buffer, ram_buff+offset, len);
		else
			memcpy(ram_buff+offset, req->buffer, len);

		/* wrap up, 0 = success, -errno = fail */
		if (!__blk_end_request_cur(req, 0))
			req = blk_fetch_request(q);
	}

}
Exemplo n.º 6
0
static void do_ramblk_request(struct request_queue *q )
{
	struct request *req;
//	static volatile int r_cnt = 0;
//	static volatile int w_cnt = 0;
	//printk("ramblk_request_fn %d.\n",cnt++);
	req = blk_fetch_request(q);
	while (req) {
			unsigned long start = blk_rq_pos(req) << 9;
			unsigned long len  = blk_rq_cur_bytes(req);
//			printk("len=%d.\n",len);
		
			if (start + len > RAMBLK_SIZE) {
					printk("RAMBLK_SIZE< start+len");
					goto done;
				}
			
			if (rq_data_dir(req) == READ)
				memcpy(req->buffer, (char *)(start+ramblk_buf), len);
			else
				memcpy((char *)(start+ramblk_buf), req->buffer, len);
		
			done:
				if (!__blk_end_request_cur(req, 0))
					req = blk_fetch_request(q);
	}
	
}
Exemplo n.º 7
0
static void do_z2_request(struct request_queue *q)
{
	struct request *req;

	req = blk_fetch_request(q);
	while (req) {
		unsigned long start = blk_rq_pos(req) << 9;
		unsigned long len  = blk_rq_cur_bytes(req);
		int err = 0;

		if (start + len > z2ram_size) {
			printk( KERN_ERR DEVICE_NAME ": bad access: block=%lu, count=%u\n",
				blk_rq_pos(req), blk_rq_cur_sectors(req));
			err = -EIO;
			goto done;
		}
		while (len) {
			unsigned long addr = start & Z2RAM_CHUNKMASK;
			unsigned long size = Z2RAM_CHUNKSIZE - addr;
			if (len < size)
				size = len;
			addr += z2ram_map[ start >> Z2RAM_CHUNKSHIFT ];
			if (rq_data_dir(req) == READ)
				memcpy(req->buffer, (char *)addr, size);
			else
				memcpy((char *)addr, req->buffer, size);
			start += size;
			len -= size;
		}
	done:
		if (!__blk_end_request_cur(req, err))
			req = blk_fetch_request(q);
	}
}
Exemplo n.º 8
0
static void mini2440_ramdisk_request(struct request_queue *q)
{    
	static int r_cnt = 0;
	static int w_cnt = 0;
	struct request *req;
	
	req = blk_fetch_request(q);
	while (req) {
		/* 数据传输三要素: 源,目的,长度 */
		/* 源/目的: */
		unsigned long offset = blk_rq_pos(req) << 9;

		/* 长度: */		
		unsigned long len = blk_rq_cur_bytes(req);

		if (rq_data_dir(req) == READ) {
			printk("[RAMDISK]ramdisk_request read %d\n", ++r_cnt);
			memcpy(req->buffer, mini2440_ramdisk_devp->ramdisk_buffer+offset, len);
		}else {
			printk("[RAMDISK]ramdisk_request write %d\n", ++w_cnt);
			memcpy(mini2440_ramdisk_devp->ramdisk_buffer+offset, req->buffer, len);
		}		
		
		if (!__blk_end_request_cur(req, 0))
			req = blk_fetch_request(q);
		else
			printk("[RAMDISK]__blk_end_request_cur error!\n");
	}
}
Exemplo n.º 9
0
static bool swim3_end_request(struct floppy_state *fs, blk_status_t err, unsigned int nr_bytes)
{
	struct request *req = fs->cur_req;
	int rc;

	swim3_dbg("  end request, err=%d nr_bytes=%d, cur_req=%p\n",
		  err, nr_bytes, req);

	if (err)
		nr_bytes = blk_rq_cur_bytes(req);
	rc = __blk_end_request(req, err, nr_bytes);
	if (rc)
		return true;
	fs->cur_req = NULL;
	return false;
}
Exemplo n.º 10
0
/*
 * Generic MMC request handler.  This is called for any queue on a
 * particular host.  When the host is not busy, we look for a request
 * on any queue on this host, and attempt to issue it.  This may
 * not be the queue we were asked to process.
 */
static void mmc_request(struct request_queue *q)
{
	struct mmc_queue *mq = q->queuedata;
	struct request *req;
	int ret;

	if (!mq) {
		while ((req = elv_next_request(q)) != NULL) {
			req->cmd_flags |= REQ_QUIET;
			do {
				ret = __blk_end_request(req, -EIO,
							blk_rq_cur_bytes(req));
			} while (ret);
		}
		return;
	}

	if (!mq->req)
		wake_up_process(mq->thread);
}
Exemplo n.º 11
0
static int nbdx_request(struct request *req, struct nbdx_queue *xq)
{
	struct nbdx_file *xdev;
	unsigned long start = blk_rq_pos(req) << NBDX_SECT_SHIFT;
	unsigned long len  = blk_rq_cur_bytes(req);
	int write = rq_data_dir(req) == WRITE;
	int err;
	void* buffer = bio_data(req->bio);

	pr_debug("%s called\n", __func__);

	xdev = req->rq_disk->private_data;

	err = nbdx_transfer(xdev, buffer, start, len, write, req, xq);
	if (unlikely(err))
		pr_err("transfer failed for req %p\n", req);

	return err;

}
Exemplo n.º 12
0
/*
 * Generic MMC request handler.  This is called for any queue on a
 * particular host.  When the host is not busy, we look for a request
 * on any queue on this host, and attempt to issue it.  This may
 * not be the queue we were asked to process.
 */
static void mmc_request(struct request_queue *q)
{
	struct mmc_queue *mq = q->queuedata;
	struct request *req;
	int ret;

	if (!mq) {
		printk(KERN_ERR "MMC: killing requests for dead queue\n");
		while ((req = elv_next_request(q)) != NULL) {
			do {
				ret = __blk_end_request(req, -EIO,
							blk_rq_cur_bytes(req));
			} while (ret);
		}
		return;
	}

	if (!mq->req)
		wake_up_process(mq->thread);
}
Exemplo n.º 13
0
/*
 * The simple form of the request function.
 */
static void sbull_request(struct request_queue *q)
{
	struct request *req;

	while ((req = blk_peek_request(q)) != NULL) {
		struct sbull_dev *dev = req->rq_disk->private_data;
                blk_start_request(req);
		if (! req->cmd_type != REQ_TYPE_FS) {
			printk (KERN_NOTICE "Skip non-fs request\n");
			__blk_end_request_all(req, -EIO);
			continue;
		}
    //    	printk (KERN_NOTICE "Req dev %d dir %ld sec %ld, nr %d f %lx\n",
    //    			dev - Devices, rq_data_dir(req),
    //    			req->sector, req->current_nr_sectors,
    //    			req->flags);
		sbull_transfer(dev, blk_rq_pos(req), blk_rq_cur_bytes(req),
                               req->buffer, rq_data_dir(req));
		__blk_end_request_all(req, 0);
	}
}
Exemplo n.º 14
0
Arquivo: test.c Projeto: sktwj/var
//从请求队列上获取请求操作对象,从请求对象中获得操作参数:读写操作的起始sector和操作字节数,然后将所需的操作执行到硬件上去
//本函数是由blk驱动框架来自动调用的,调用时机由电梯算法调度决定
static void do_ldm_req(struct request_queue *q)
{
	//从请求队列上获取一个请求对象
	struct request *req = blk_fetch_request(q);
	while (req) {
		//从第几个扇区开始操作
		u32 start = blk_rq_pos(req) * SECTOR_SIZE;
		//获得当前请求操作的字节数
		u32 len = blk_rq_cur_bytes(req);

		//检查本次request操作是否越界
		int err = 0;
		if (start + len > DEV_SIZE) {
			printk(KERN_ERR "request region is out of device capacity\n");
			err = -EIO;
			goto err_request;
		}

		//rq_data_dir获得当前请求的操作方向
		//建议在memcpy前后加上打印语句,以便观察读写操作的调度时机
		//数据从内核传输到应用
		if (rq_data_dir(req) == READ) {
			memcpy(req->buffer, (u8*)ldm.addr + start, len);
			printk("read from %d, size %d\n", start, len);
		} else { //数据从应用层传输到内核并写入
			memcpy((u8*)ldm.addr + start, req->buffer, len);
			printk("write from %d, size %d\n", start, len);
		}

		//__blk_end_request_cur:返回false表示当前req的所有操作都完成了,于是下面试图调用blk_fetch_request再从队列上获取新的请求,如果获取不到,则req得到NULL将退出循环;
		//返回true的话说明当前req操作还没完成,继续循环执行
		//err参数可以独立改变__blk_end_request_cur的返回值,err<0时,函数返回false。当发生其他错误时可以用err参数来结束当前req请求,从请求队列上获取新的请求
err_request:
		if (!__blk_end_request_cur(req, err)) {
			req = blk_fetch_request(q);
		}
	}
}
Exemplo n.º 15
0
static int tbio_transfer(struct request *req, struct tbio_device *dev)
{
	unsigned int i = 0, offset = 0;
	char *buf;
	unsigned long flags;
	size_t size;

	struct bio_vec *bv;
	struct req_iterator iter;

	size = blk_rq_cur_bytes(req);
	prk_info("bio req of size %zu:", size);
	offset = blk_rq_pos(req) * 512;

	rq_for_each_segment(bv, req, iter) {
		size = bv->bv_len;
		prk_info("%s bio(%u), segs(%u) sect(%u) pos(%lu) off(%u)",
			(bio_data_dir(iter.bio) == READ) ? "READ" : "WRITE",
			i, bio_segments(iter.bio), bio_sectors(iter.bio),
			iter.bio->bi_sector, offset);

		if (get_capacity(req->rq_disk) * 512 < offset) {
			prk_info("Error, small capacity %zu, offset %u",
				get_capacity(req->rq_disk) * 512,
				offset);
			continue;
		}

		buf = bvec_kmap_irq(bv, &flags);
		if (bio_data_dir(iter.bio) == WRITE)
			memcpy(dev->data + offset, buf, size);
		else
			memcpy(buf, dev->data + offset, size);
		offset += size;
		flush_kernel_dcache_page(bv->bv_page);
		bvec_kunmap_irq(buf, &flags);
		++i;
	}
Exemplo n.º 16
0
static void do_z2_request(struct request_queue *q)
{
	struct request *req;

	req = blk_fetch_request(q);
	while (req) {
		unsigned long start = blk_rq_pos(req) << 9;
		unsigned long len  = blk_rq_cur_bytes(req);
		blk_status_t err = BLK_STS_OK;

		if (start + len > z2ram_size) {
			pr_err(DEVICE_NAME ": bad access: block=%llu, "
			       "count=%u\n",
			       (unsigned long long)blk_rq_pos(req),
			       blk_rq_cur_sectors(req));
			err = BLK_STS_IOERR;
			goto done;
		}
		while (len) {
			unsigned long addr = start & Z2RAM_CHUNKMASK;
			unsigned long size = Z2RAM_CHUNKSIZE - addr;
			void *buffer = bio_data(req->bio);

			if (len < size)
				size = len;
			addr += z2ram_map[ start >> Z2RAM_CHUNKSHIFT ];
			if (rq_data_dir(req) == READ)
				memcpy(buffer, (char *)addr, size);
			else
				memcpy((char *)addr, buffer, size);
			start += size;
			len -= size;
		}
	done:
		if (!__blk_end_request_cur(req, err))
			req = blk_fetch_request(q);
	}
}
Exemplo n.º 17
0
int remove_all_req(struct mmc_queue *mq)
{
	int i = 0;
	struct request_queue *q = mq->queue;
	struct request *req = NULL;
	if (NULL == mq) {
		return 0;
	}
	spin_lock_irq(q->queue_lock);
	while ((req = blk_fetch_request(q)) != NULL) {
		int ret = 0;
		do {
			req->cmd_flags |= REQ_QUIET;
			ret = __blk_end_request(req, -EIO,
						blk_rq_cur_bytes(req));
		} while (ret);
		i ++;
	}
	spin_unlock_irq(q->queue_lock);

	printk(KERN_ERR"rms:%s %d req %d\n", __FUNCTION__, __LINE__, i);
	return i;
}
Exemplo n.º 18
0
static bool hd_end_request_cur(int err)
{
	return hd_end_request(err, blk_rq_cur_bytes(hd_req));
}
Exemplo n.º 19
0
/*
 * Generic MMC request handler.  This is called for any queue on a
 * particular host.  When the host is not busy, we look for a request
 * on any queue on this host, and attempt to issue it.  This may
 * not be the queue we were asked to process.
 */
static void mmc_request(struct request_queue *q)
{
	struct mmc_queue *mq = q->queuedata;
	struct request *req;
	int ret;
#if 0
	if (!mq) {
#else
    //插着USB线(充电姿态),拔插卡,有偶尔死机现象。出现mq->thread为空的现象;modifyed by xbw
    if (!mq ||!mq->thread) {
#endif	
		printk(KERN_ERR "MMC: killing requests for dead queue\n");
		while ((req = elv_next_request(q)) != NULL) {
			do {
				ret = __blk_end_request(req, -EIO,
							blk_rq_cur_bytes(req));
			} while (ret);
		}
		return;
	}

	if (!mq->req)
		wake_up_process(mq->thread);
}

/**
 * mmc_init_queue - initialise a queue structure.
 * @mq: mmc queue
 * @card: mmc card to attach this queue
 * @lock: queue lock
 *
 * Initialise a MMC card request queue.
 */
int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card, spinlock_t *lock)
{
	struct mmc_host *host = card->host;
	u64 limit = BLK_BOUNCE_ANY ; // BLK_BOUNCE_HIGH;
	int ret;

	if (mmc_dev(host)->dma_mask && *mmc_dev(host)->dma_mask)
		limit = *mmc_dev(host)->dma_mask;

	mq->card = card;
	mq->queue = blk_init_queue(mmc_request, lock);
	if (!mq->queue)
		return -ENOMEM;

	mq->queue->queuedata = mq;
	mq->req = NULL;

	blk_queue_prep_rq(mq->queue, mmc_prep_request);

#ifdef CONFIG_MMC_BLOCK_BOUNCE
	if (host->max_hw_segs == 1) {
		unsigned int bouncesz;

		bouncesz = MMC_QUEUE_BOUNCESZ;

		if (bouncesz > host->max_req_size)
			bouncesz = host->max_req_size;
		if (bouncesz > host->max_seg_size)
			bouncesz = host->max_seg_size;

		mq->bounce_buf = kmalloc(bouncesz, GFP_KERNEL);
		if (!mq->bounce_buf) {
			printk(KERN_WARNING "%s: unable to allocate "
				"bounce buffer\n", mmc_card_name(card));
		} else {
			blk_queue_bounce_limit(mq->queue, BLK_BOUNCE_HIGH);
			blk_queue_max_sectors(mq->queue, bouncesz / 512);
			blk_queue_max_phys_segments(mq->queue, bouncesz / 512);
			blk_queue_max_hw_segments(mq->queue, bouncesz / 512);
			blk_queue_max_segment_size(mq->queue, bouncesz);

			mq->sg = kmalloc(sizeof(struct scatterlist),
				GFP_KERNEL);
			if (!mq->sg) {
				ret = -ENOMEM;
				goto cleanup_queue;
			}
			sg_init_table(mq->sg, 1);

			mq->bounce_sg = kmalloc(sizeof(struct scatterlist) *
				bouncesz / 512, GFP_KERNEL);
			if (!mq->bounce_sg) {
				ret = -ENOMEM;
				goto cleanup_queue;
			}
			sg_init_table(mq->bounce_sg, bouncesz / 512);
		}
	}
#endif

	if (!mq->bounce_buf) {
		blk_queue_bounce_limit(mq->queue, limit);
		blk_queue_max_sectors(mq->queue, host->max_req_size / 512);
		blk_queue_max_phys_segments(mq->queue, host->max_phys_segs);
		blk_queue_max_hw_segments(mq->queue, host->max_hw_segs);
		blk_queue_max_segment_size(mq->queue, host->max_seg_size);

		mq->sg = kmalloc(sizeof(struct scatterlist) *
			host->max_phys_segs, GFP_KERNEL);
		if (!mq->sg) {
			ret = -ENOMEM;
			goto cleanup_queue;
		}
		sg_init_table(mq->sg, host->max_phys_segs);
	}

	init_MUTEX(&mq->thread_sem);

	mq->thread = kthread_run(mmc_queue_thread, mq, "mmcqd");
	if (IS_ERR(mq->thread)) {
		ret = PTR_ERR(mq->thread);
		goto free_bounce_sg;
	}

	return 0;
 free_bounce_sg:
 	if (mq->bounce_sg)
 		kfree(mq->bounce_sg);
 	mq->bounce_sg = NULL;
 cleanup_queue:
 	if (mq->sg)
		kfree(mq->sg);
	mq->sg = NULL;
	if (mq->bounce_buf)
		kfree(mq->bounce_buf);
	mq->bounce_buf = NULL;
	blk_cleanup_queue(mq->queue);
	return ret;
}

void mmc_cleanup_queue(struct mmc_queue *mq)
{
	struct request_queue *q = mq->queue;
	unsigned long flags;

	/* Mark that we should start throwing out stragglers */
	spin_lock_irqsave(q->queue_lock, flags);
	q->queuedata = NULL;
	spin_unlock_irqrestore(q->queue_lock, flags);

	/* Make sure the queue isn't suspended, as that will deadlock */
	mmc_queue_resume(mq);

	/* Then terminate our worker thread */
	kthread_stop(mq->thread);

 	if (mq->bounce_sg)
 		kfree(mq->bounce_sg);
 	mq->bounce_sg = NULL;

	kfree(mq->sg);
	mq->sg = NULL;

	if (mq->bounce_buf)
		kfree(mq->bounce_buf);
	mq->bounce_buf = NULL;

	blk_cleanup_queue(mq->queue);

	mq->card = NULL;
}
EXPORT_SYMBOL(mmc_cleanup_queue);

/**
 * mmc_queue_suspend - suspend a MMC request queue
 * @mq: MMC queue to suspend
 *
 * Stop the block request queue, and wait for our thread to
 * complete any outstanding requests.  This ensures that we
 * won't suspend while a request is being processed.
 */
void mmc_queue_suspend(struct mmc_queue *mq)
{
	struct request_queue *q = mq->queue;
	unsigned long flags;

	if (!(mq->flags & MMC_QUEUE_SUSPENDED)) {
		mq->flags |= MMC_QUEUE_SUSPENDED;

		spin_lock_irqsave(q->queue_lock, flags);
		blk_stop_queue(q);
		spin_unlock_irqrestore(q->queue_lock, flags);

		down(&mq->thread_sem);
	}
}

/**
 * mmc_queue_resume - resume a previously suspended MMC request queue
 * @mq: MMC queue to resume
 */
void mmc_queue_resume(struct mmc_queue *mq)
{
	struct request_queue *q = mq->queue;
	unsigned long flags;

	if (mq->flags & MMC_QUEUE_SUSPENDED) {
		mq->flags &= ~MMC_QUEUE_SUSPENDED;

		up(&mq->thread_sem);

		spin_lock_irqsave(q->queue_lock, flags);
		blk_start_queue(q);
		spin_unlock_irqrestore(q->queue_lock, flags);
	}
}

static void copy_sg(struct scatterlist *dst, unsigned int dst_len,
	struct scatterlist *src, unsigned int src_len)
{
	unsigned int chunk;
	char *dst_buf, *src_buf;
	unsigned int dst_size, src_size;

	dst_buf = NULL;
	src_buf = NULL;
	dst_size = 0;
	src_size = 0;

	while (src_len) {
		BUG_ON(dst_len == 0);

		if (dst_size == 0) {
			dst_buf = sg_virt(dst);
			dst_size = dst->length;
		}

		if (src_size == 0) {
			src_buf = sg_virt(src);
			src_size = src->length;
		}

		chunk = min(dst_size, src_size);

		memcpy(dst_buf, src_buf, chunk);

		dst_buf += chunk;
		src_buf += chunk;
		dst_size -= chunk;
		src_size -= chunk;

		if (dst_size == 0) {
			dst++;
			dst_len--;
		}

		if (src_size == 0) {
			src++;
			src_len--;
		}
	}
}
Exemplo n.º 20
0
static bool swim3_end_request_cur(int err)
{
	return swim3_end_request(err, blk_rq_cur_bytes(fd_req));
}
Exemplo n.º 21
0
static void ace_fsm_dostate(struct ace_device *ace)
{
	struct request *req;
	u32 status;
	u16 val;
	int count;

#if defined(DEBUG)
	dev_dbg(ace->dev, "fsm_state=%i, id_req_count=%i\n",
		ace->fsm_state, ace->id_req_count);
#endif

	switch (ace->fsm_state) {
	case ACE_FSM_STATE_IDLE:
		/* See if there is anything to do */
		if (ace->id_req_count || ace_get_next_request(ace->queue)) {
			ace->fsm_iter_num++;
			ace->fsm_state = ACE_FSM_STATE_REQ_LOCK;
			mod_timer(&ace->stall_timer, jiffies + HZ);
			if (!timer_pending(&ace->stall_timer))
				add_timer(&ace->stall_timer);
			break;
		}
		del_timer(&ace->stall_timer);
		ace->fsm_continue_flag = 0;
		break;

	case ACE_FSM_STATE_REQ_LOCK:
		if (ace_in(ace, ACE_STATUS) & ACE_STATUS_MPULOCK) {
			/* Already have the lock, jump to next state */
			ace->fsm_state = ACE_FSM_STATE_WAIT_CFREADY;
			break;
		}

		/* Request the lock */
		val = ace_in(ace, ACE_CTRL);
		ace_out(ace, ACE_CTRL, val | ACE_CTRL_LOCKREQ);
		ace->fsm_state = ACE_FSM_STATE_WAIT_LOCK;
		break;

	case ACE_FSM_STATE_WAIT_LOCK:
		if (ace_in(ace, ACE_STATUS) & ACE_STATUS_MPULOCK) {
			/* got the lock; move to next state */
			ace->fsm_state = ACE_FSM_STATE_WAIT_CFREADY;
			break;
		}

		/* wait a bit for the lock */
		ace_fsm_yield(ace);
		break;

	case ACE_FSM_STATE_WAIT_CFREADY:
		status = ace_in32(ace, ACE_STATUS);
		if (!(status & ACE_STATUS_RDYFORCFCMD) ||
		    (status & ACE_STATUS_CFBSY)) {
			/* CF card isn't ready; it needs to be polled */
			ace_fsm_yield(ace);
			break;
		}

		/* Device is ready for command; determine what to do next */
		if (ace->id_req_count)
			ace->fsm_state = ACE_FSM_STATE_IDENTIFY_PREPARE;
		else
			ace->fsm_state = ACE_FSM_STATE_REQ_PREPARE;
		break;

	case ACE_FSM_STATE_IDENTIFY_PREPARE:
		/* Send identify command */
		ace->fsm_task = ACE_TASK_IDENTIFY;
		ace->data_ptr = &ace->cf_id;
		ace->data_count = ACE_BUF_PER_SECTOR;
		ace_out(ace, ACE_SECCNTCMD, ACE_SECCNTCMD_IDENTIFY);

		/* As per datasheet, put config controller in reset */
		val = ace_in(ace, ACE_CTRL);
		ace_out(ace, ACE_CTRL, val | ACE_CTRL_CFGRESET);

		/* irq handler takes over from this point; wait for the
		 * transfer to complete */
		ace->fsm_state = ACE_FSM_STATE_IDENTIFY_TRANSFER;
		ace_fsm_yieldirq(ace);
		break;

	case ACE_FSM_STATE_IDENTIFY_TRANSFER:
		/* Check that the sysace is ready to receive data */
		status = ace_in32(ace, ACE_STATUS);
		if (status & ACE_STATUS_CFBSY) {
			dev_dbg(ace->dev, "CFBSY set; t=%i iter=%i dc=%i\n",
				ace->fsm_task, ace->fsm_iter_num,
				ace->data_count);
			ace_fsm_yield(ace);
			break;
		}
		if (!(status & ACE_STATUS_DATABUFRDY)) {
			ace_fsm_yield(ace);
			break;
		}

		/* Transfer the next buffer */
		ace->reg_ops->datain(ace);
		ace->data_count--;

		/* If there are still buffers to be transfers; jump out here */
		if (ace->data_count != 0) {
			ace_fsm_yieldirq(ace);
			break;
		}

		/* transfer finished; kick state machine */
		dev_dbg(ace->dev, "identify finished\n");
		ace->fsm_state = ACE_FSM_STATE_IDENTIFY_COMPLETE;
		break;

	case ACE_FSM_STATE_IDENTIFY_COMPLETE:
		ace_fix_driveid(&ace->cf_id);
		ace_dump_mem(&ace->cf_id, 512);	/* Debug: Dump out disk ID */

		if (ace->data_result) {
			/* Error occured, disable the disk */
			ace->media_change = 1;
			set_capacity(ace->gd, 0);
			dev_err(ace->dev, "error fetching CF id (%i)\n",
				ace->data_result);
		} else {
			ace->media_change = 0;

			/* Record disk parameters */
			set_capacity(ace->gd, ace->cf_id.lba_capacity);
			dev_info(ace->dev, "capacity: %i sectors\n",
				 ace->cf_id.lba_capacity);
		}

		/* We're done, drop to IDLE state and notify waiters */
		ace->fsm_state = ACE_FSM_STATE_IDLE;
		ace->id_result = ace->data_result;
		while (ace->id_req_count) {
			complete(&ace->id_completion);
			ace->id_req_count--;
		}
		break;

	case ACE_FSM_STATE_REQ_PREPARE:
		req = ace_get_next_request(ace->queue);
		if (!req) {
			ace->fsm_state = ACE_FSM_STATE_IDLE;
			break;
		}

		/* Okay, it's a data request, set it up for transfer */
		dev_dbg(ace->dev,
			"request: sec=%lx hcnt=%lx, ccnt=%x, dir=%i\n",
			req->sector, req->hard_nr_sectors,
			req->current_nr_sectors, rq_data_dir(req));

		ace->req = req;
		ace->data_ptr = req->buffer;
		ace->data_count = req->current_nr_sectors * ACE_BUF_PER_SECTOR;
		ace_out32(ace, ACE_MPULBA, req->sector & 0x0FFFFFFF);

		count = req->hard_nr_sectors;
		if (rq_data_dir(req)) {
			/* Kick off write request */
			dev_dbg(ace->dev, "write data\n");
			ace->fsm_task = ACE_TASK_WRITE;
			ace_out(ace, ACE_SECCNTCMD,
				count | ACE_SECCNTCMD_WRITE_DATA);
		} else {
			/* Kick off read request */
			dev_dbg(ace->dev, "read data\n");
			ace->fsm_task = ACE_TASK_READ;
			ace_out(ace, ACE_SECCNTCMD,
				count | ACE_SECCNTCMD_READ_DATA);
		}

		/* As per datasheet, put config controller in reset */
		val = ace_in(ace, ACE_CTRL);
		ace_out(ace, ACE_CTRL, val | ACE_CTRL_CFGRESET);

		/* Move to the transfer state.  The systemace will raise
		 * an interrupt once there is something to do
		 */
		ace->fsm_state = ACE_FSM_STATE_REQ_TRANSFER;
		if (ace->fsm_task == ACE_TASK_READ)
			ace_fsm_yieldirq(ace);	/* wait for data ready */
		break;

	case ACE_FSM_STATE_REQ_TRANSFER:
		/* Check that the sysace is ready to receive data */
		status = ace_in32(ace, ACE_STATUS);
		if (status & ACE_STATUS_CFBSY) {
			dev_dbg(ace->dev,
				"CFBSY set; t=%i iter=%i c=%i dc=%i irq=%i\n",
				ace->fsm_task, ace->fsm_iter_num,
				ace->req->current_nr_sectors * 16,
				ace->data_count, ace->in_irq);
			ace_fsm_yield(ace);	/* need to poll CFBSY bit */
			break;
		}
		if (!(status & ACE_STATUS_DATABUFRDY)) {
			dev_dbg(ace->dev,
				"DATABUF not set; t=%i iter=%i c=%i dc=%i irq=%i\n",
				ace->fsm_task, ace->fsm_iter_num,
				ace->req->current_nr_sectors * 16,
				ace->data_count, ace->in_irq);
			ace_fsm_yieldirq(ace);
			break;
		}

		/* Transfer the next buffer */
		if (ace->fsm_task == ACE_TASK_WRITE)
			ace->reg_ops->dataout(ace);
		else
			ace->reg_ops->datain(ace);
		ace->data_count--;

		/* If there are still buffers to be transfers; jump out here */
		if (ace->data_count != 0) {
			ace_fsm_yieldirq(ace);
			break;
		}

		/* bio finished; is there another one? */
		if (__blk_end_request(ace->req, 0,
					blk_rq_cur_bytes(ace->req))) {
			/* dev_dbg(ace->dev, "next block; h=%li c=%i\n",
			 *      ace->req->hard_nr_sectors,
			 *      ace->req->current_nr_sectors);
			 */
			ace->data_ptr = ace->req->buffer;
			ace->data_count = ace->req->current_nr_sectors * 16;
			ace_fsm_yieldirq(ace);
			break;
		}

		ace->fsm_state = ACE_FSM_STATE_REQ_COMPLETE;
		break;

	case ACE_FSM_STATE_REQ_COMPLETE:
		ace->req = NULL;

		/* Finished request; go to idle state */
		ace->fsm_state = ACE_FSM_STATE_IDLE;
		break;

	default:
		ace->fsm_state = ACE_FSM_STATE_IDLE;
		break;
	}
}
Exemplo n.º 22
0
static void do_tzmem_blk_request(struct request_queue *q)
{
	struct request *req;
	uint32_t i;

#ifdef MTEE_TZMEM_DBG
	pr_warn("====> do_tzmem_blk_request\n");
#endif

	req = blk_fetch_request(q);
	while (req) {
		unsigned long start = blk_rq_pos(req) << 9;
		unsigned long len  = blk_rq_cur_bytes(req);
		int err = 0;
		struct gendisk *disk = req->rq_disk;
		struct tzmem_diskinfo_s *diskInfo = (struct tzmem_diskinfo_s *)disk->private_data;
		KREE_SESSION_HANDLE session;

		session = diskInfo->session;

#ifdef MTEE_TZMEM_DBG
		pr_warn("====> 0x%x 0x%x\n", (uint32_t) session, diskInfo->size);
#endif

		if ((start + len > diskInfo->size) || (start > diskInfo->size) || (len > diskInfo->size)) {
			err = -EIO;
			goto done;
		}

		if (rq_data_dir(req) == READ) {
#ifdef MTEE_TZMEM_DBG
			pr_warn("====> do_tzmem_blk_request: read = 0x%x, 0x%x\n", (uint32_t) start, (uint32_t) len);
#endif
			for (i = 0; i < len / KREE_RELEASECM_MAX_SIZE; i++) {
				KREE_ReadSecurechunkmem((KREE_SESSION_HANDLE) session,
					start + i * KREE_RELEASECM_MAX_SIZE,
					KREE_RELEASECM_MAX_SIZE,
					req->buffer + i * KREE_RELEASECM_MAX_SIZE);
			}
			if (len % KREE_RELEASECM_MAX_SIZE) {
				KREE_ReadSecurechunkmem((KREE_SESSION_HANDLE) session,
					start + i * KREE_RELEASECM_MAX_SIZE,
					len % KREE_RELEASECM_MAX_SIZE,
					req->buffer + i * KREE_RELEASECM_MAX_SIZE);
			}
		} else {
#ifdef MTEE_TZMEM_DBG
			pr_warn("====> do_tzmem_blk_request: write = 0x%x, 0x%x\n", (uint32_t) start, (uint32_t) len);
#endif
			for (i = 0; i < len / KREE_RELEASECM_MAX_SIZE; i++) {
				KREE_WriteSecurechunkmem((KREE_SESSION_HANDLE) session,
					start + i * KREE_RELEASECM_MAX_SIZE,
					KREE_RELEASECM_MAX_SIZE,
					req->buffer + i * KREE_RELEASECM_MAX_SIZE);
			}
			if (len % KREE_RELEASECM_MAX_SIZE) {
				KREE_WriteSecurechunkmem((KREE_SESSION_HANDLE) session,
					start + i * KREE_RELEASECM_MAX_SIZE,
					len % KREE_RELEASECM_MAX_SIZE,
					req->buffer + i * KREE_RELEASECM_MAX_SIZE);
			}

		}

done:
		if (!__blk_end_request_cur(req, err))
			req = blk_fetch_request(q);
	}
}