Exemplo n.º 1
0
static int htifblk_segment(struct htifblk_device *dev,
	struct request *req)
{
	static struct htifblk_request pkt __aligned(HTIF_ALIGN);
	u64 offset, size, end;
	unsigned long cmd;

	offset = (blk_rq_pos(req) << SECTOR_SIZE_SHIFT);
	size = (blk_rq_cur_sectors(req) << SECTOR_SIZE_SHIFT);

	end = offset + size;
	if (unlikely(end < offset || end > dev->size)) {
		dev_err(&dev->dev->dev, "out-of-bounds access:"
			" offset=%llu size=%llu\n", offset, size);
		return -EINVAL;
	}

	rmb();
	pkt.addr = __pa(req->buffer);
	pkt.offset = offset;
	pkt.size = size;
	pkt.tag = dev->tag;

	switch (rq_data_dir(req)) {
	case READ:
		cmd = HTIF_CMD_READ;
		break;
	case WRITE:
		cmd = HTIF_CMD_WRITE;
		break;
	default:
		return -EINVAL;
	}

	dev->req = req;
	htif_tohost(dev->dev->index, cmd, __pa(&pkt));
	return 0;
}
Exemplo n.º 2
0
Arquivo: test.c Projeto: sktwj/var
//从请求队列上获取请求操作对象,从请求对象中获得操作参数:读写操作的起始sector和操作字节数,然后将所需的操作执行到硬件上去
//本函数是由blk驱动框架来自动调用的,调用时机由电梯算法调度决定
static void do_ldm_req(struct request_queue *q)
{
	//从请求队列上获取一个请求对象
	struct request *req = blk_fetch_request(q);
	while (req) {
		//从第几个扇区开始操作
		u32 start = blk_rq_pos(req) * SECTOR_SIZE;
		//获得当前请求操作的字节数
		u32 len = blk_rq_cur_bytes(req);

		//检查本次request操作是否越界
		int err = 0;
		if (start + len > DEV_SIZE) {
			printk(KERN_ERR "request region is out of device capacity\n");
			err = -EIO;
			goto err_request;
		}

		//rq_data_dir获得当前请求的操作方向
		//建议在memcpy前后加上打印语句,以便观察读写操作的调度时机
		//数据从内核传输到应用
		if (rq_data_dir(req) == READ) {
			memcpy(req->buffer, (u8*)ldm.addr + start, len);
			printk("read from %d, size %d\n", start, len);
		} else { //数据从应用层传输到内核并写入
			memcpy((u8*)ldm.addr + start, req->buffer, len);
			printk("write from %d, size %d\n", start, len);
		}

		//__blk_end_request_cur:返回false表示当前req的所有操作都完成了,于是下面试图调用blk_fetch_request再从队列上获取新的请求,如果获取不到,则req得到NULL将退出循环;
		//返回true的话说明当前req操作还没完成,继续循环执行
		//err参数可以独立改变__blk_end_request_cur的返回值,err<0时,函数返回false。当发生其他错误时可以用err参数来结束当前req请求,从请求队列上获取新的请求
err_request:
		if (!__blk_end_request_cur(req, err)) {
			req = blk_fetch_request(q);
		}
	}
}
Exemplo n.º 3
0
/**
 * row_reinsert_req() - Reinsert request back to the scheduler
 * @q:	requests queue
 * @rq:	request to add
 *
 * Reinsert the given request back to the queue it was
 * dispatched from as if it was never dispatched.
 *
 * Returns 0 on success, error code otherwise
 */
static int row_reinsert_req(struct request_queue *q,
			    struct request *rq)
{
	struct row_data    *rd = q->elevator->elevator_data;
	struct row_queue   *rqueue = RQ_ROWQ(rq);

	/* Verify rqueue is legitimate */
	if (rqueue->prio >= ROWQ_MAX_PRIO) {
		pr_err("\n\nROW BUG: row_reinsert_req() rqueue->prio = %d\n",
			   rqueue->prio);
		blk_dump_rq_flags(rq, "");
		return -EIO;
	}

	list_add(&rq->queuelist, &rqueue->fifo);
	rd->nr_reqs[rq_data_dir(rq)]++;
	rqueue->nr_req++;

	row_log_rowq(rd, rqueue->prio,
		"request reinserted (total on queue=%d)", rqueue->nr_req);

	return 0;
}
Exemplo n.º 4
0
/*
* The simple form of the request function.
*/
static void sbull_request(struct request_queue *q)
{
	struct request *req;

	req = blk_fetch_request(q);
	while (req != NULL) {
		struct sbull_dev *dev = req->rq_disk->private_data;
		if (! blk_fs_request(req)) {
			printk (KERN_NOTICE "Skip non-fs request\n");
			__blk_end_request_all(req, -EIO);
			continue;
		}
	    //    printk (KERN_NOTICE "Req dev %d dir %ld sec %ld, nr %d f %lx\n",
	    //        dev - Devices, rq_data_dir(req),
	    //        req->sector, req->current_nr_sectors,
	    //        req->flags);
		sbull_transfer(dev, blk_rq_pos(req), blk_rq_cur_sectors(req), req->buffer, rq_data_dir(req));
		/* end_request(req, 1); */
		if(!__blk_end_request_cur(req, 0)) {
			req = blk_fetch_request(q);
		}
	}
}
Exemplo n.º 5
0
static void htifbd_request(struct request_queue *q)
{
	struct request *req;

	req = blk_fetch_request(q);
	while (req != NULL) {
		struct htifbd_dev *dev;

		dev = req->rq_disk->private_data;
		if (req->cmd_type != REQ_TYPE_FS) {
			pr_notice(DRIVER_NAME ": ignoring non-fs request for %s\n",
				req->rq_disk->disk_name);
			__blk_end_request_all(req, -EIO);
			continue;
		}

		htifbd_transfer(dev, blk_rq_pos(req), blk_rq_cur_sectors(req),
			req->buffer, rq_data_dir(req));
		if (!__blk_end_request_cur(req, 0)) {
			req = blk_fetch_request(q);
		}
	}
}
Exemplo n.º 6
0
/*
 * Request dispatcher.
 */
static int sd_do_request(struct sd_host *host, struct request *req)
{
	int nr_sectors = 0;
	int error;

	error = sd_check_request(host, req);
	if (error) {
		nr_sectors = error;
		goto out;
	}

	switch (rq_data_dir(req)) {
	case WRITE:
		nr_sectors = sd_write_request(host, req);
		break;
	case READ:
		nr_sectors = sd_read_request(host, req);
		break;
	}

out:
	return nr_sectors;
}
Exemplo n.º 7
0
/* 
   This function does 3 tasks:
   1 check if next expires before req, is so set expire time of req to be the expire time of next
   2 delete next from async fifo queue
   3 check if merged req size >= bundle_size; if so, delete req from async fifo queue, reinit and insert it to bundle queue
 */
static void
flash_merged_requests(struct request_queue *q, struct request *req,
			 struct request *next)
{
	struct flash_data *fd = q->elevator->elevator_data;
	// const int data_type = !rq_is_sync(req);
	// FIXME:
	const int data_type = rq_data_dir(req);

	/*
	 * if next expires before rq, assign its expire time to rq
	 * and move into next position (next will be deleted) in fifo
	 */
	// TODO: why need to check if async queue is empty here?
	if (!list_empty(&req->queuelist) && !list_empty(&next->queuelist)) {
		if (time_before(rq_fifo_time(next), rq_fifo_time(req))) {
			list_move(&req->queuelist, &next->queuelist);
			rq_set_fifo_time(req, rq_fifo_time(next));
		}
	}

	/* delete next */
	rq_fifo_clear(next);
	
	/* task 3 only kick into bundle queue if req is async */
	if(req->__data_len >= fd->bundle_size && data_type == 1)
	{
		/* did both delete and init */
		rq_fifo_clear(req); 
		list_add_tail(&req->queuelist, &fd->bundle_list);
		
		#ifdef DEBUG_FLASH
		printk("req of type %d of size %d is inserted to bundle queue\n", data_type, req->__data_len);
		#endif
	}

}
Exemplo n.º 8
0
/*
 * osprd_process_request(d, req)
 *   Called when the user reads or writes a sector.
 *   Should perform the read or write, as appropriate.
 */
static void osprd_process_request(osprd_info_t *d, struct request *req)
{
	if (!blk_fs_request(req)) {
		end_request(req, 0);
		return;
	}

	// EXERCISE: Perform the read or write request by copying data between
	// our data array and the request's buffer.
	// Hint: The 'struct request' argument tells you what kind of request
	// this is, and which sectors are being read or written.
	// Read about 'struct request' in <linux/blkdev.h>.
	// Consider the 'req->sector', 'req->current_nr_sectors', and
	// 'req->buffer' members, and the rq_data_dir() function.

	// Your code here.
	if(req->sector+req->current_nr_sectors <= nsectors) {
	
		switch(rq_data_dir(req)) {
			case READ:
				memcpy(req->buffer, d->data+req->sector*SECTOR_SIZE, req->current_nr_sectors*SECTOR_SIZE);
				break;
			case WRITE:
				memcpy(d->data+req->sector*SECTOR_SIZE, req->buffer, req->current_nr_sectors*SECTOR_SIZE);
				break;
			default:
				eprintk("Failed to process request...\n");
				end_request(req, 0);
		}
	}
	else {
		eprintk("Sector overflow...\n");
		end_request(req, 0);
	}
	
	end_request(req, 1);
}
Exemplo n.º 9
0
 //first implement this, and test cases not involved lock can pass
static void osprd_process_request(osprd_info_t *d, struct request *req)
{
	if (!blk_fs_request(req)) {
		end_request(req, 0);
		return;
	}

	// EXERCISE: Perform the read or write request by copying data between
	// our data array and the request's buffer.
	// Hint: The 'struct request' argument tells you what kind of request
	// this is, and which sectors are being read or written.
	// Read about 'struct request' in <linux/blkdev.h>.
	// Consider the 'req->sector', 'req->current_nr_sectors', and
	// 'req->buffer' members, and the rq_data_dir() function.

	// Your code here.
	//specify the request is read or write
	unsigned int requestType = rq_data_dir(req);
	//compute the offset, set pointer to corret region 
	//the beginning address in osprd we are going to interact with 
	uint8_t *data_ptr = d->data + (req->sector) * SECTOR_SIZE;
	
	if (requestType == READ)
	{
		memcpy((void *)req->buffer, (void *)data_ptr, req->current_nr_sectors * SECTOR_SIZE);
	}
	else if (requestType == WRITE)
	{
		memcpy((void *)data_ptr, (void *)req->buffer, req->current_nr_sectors * SECTOR_SIZE);
	}
	else
	{
		eprintk("Error read/wirte.\n");
		end_request(req, 0);	
	}
	end_request(req, 1);  //minimum read/write is one sector
}
Exemplo n.º 10
0
bool blk_rq_merge_ok(struct request *rq, struct bio *bio)
{
	struct request_queue *q = rq->q;

	if (!rq_mergeable(rq) || !bio_mergeable(bio))
		return false;

	if (!blk_check_merge_flags(rq->cmd_flags, bio->bi_rw))
		return false;

	/* different data direction or already started, don't merge */
	if (bio_data_dir(bio) != rq_data_dir(rq))
		return false;

	/* must be same device and not a special request */
	if (rq->rq_disk != bio->bi_bdev->bd_disk || req_no_special_merge(rq))
		return false;

	/* only merge integrity protected bio into ditto rq */
	if (blk_integrity_merge_bio(rq->q, rq, bio) == false)
		return false;

	/* must be using the same buffer */
	if (rq->cmd_flags & REQ_WRITE_SAME &&
	    !blk_write_same_mergeable(rq->bio, bio))
		return false;

	if (q->queue_flags & (1 << QUEUE_FLAG_SG_GAPS)) {
		struct bio_vec *bprev;

		bprev = &rq->biotail->bi_io_vec[bio->bi_vcnt - 1];
		if (bvec_gap_to_prev(bprev, bio->bi_io_vec[0].bv_offset))
			return false;
	}

	return true;
}
Exemplo n.º 11
0
static void idefloppy_create_rw_cmd(ide_drive_t *drive,
				    struct ide_atapi_pc *pc, struct request *rq,
				    unsigned long sector)
{
	struct ide_disk_obj *floppy = drive->driver_data;
	int block = sector / floppy->bs_factor;
	int blocks = blk_rq_sectors(rq) / floppy->bs_factor;
	int cmd = rq_data_dir(rq);

	ide_debug_log(IDE_DBG_FUNC, "block: %d, blocks: %d", block, blocks);

	ide_init_pc(pc);
	pc->c[0] = cmd == READ ? GPCMD_READ_10 : GPCMD_WRITE_10;
	put_unaligned(cpu_to_be16(blocks), (unsigned short *)&pc->c[7]);
	put_unaligned(cpu_to_be32(block), (unsigned int *) &pc->c[2]);

	memcpy(rq->cmd, pc->c, 12);

	pc->rq = rq;
	if (rq->cmd_flags & REQ_RW)
		pc->flags |= PC_FLAG_WRITING;

	pc->flags |= PC_FLAG_DMA_OK;
}
Exemplo n.º 12
0
static void lkl_disk_request(struct request_queue *q)
{
	struct request *req;

	while ((req = elv_next_request(q)) != NULL) {
		struct lkl_disk_dev *dev = req->rq_disk->private_data;
		struct lkl_disk_cs cs;

		if (! blk_fs_request(req)) {
			printk (KERN_NOTICE "lkl_disk_request: skip non-fs request\n");
			__blk_end_request(req, -EIO, req->hard_cur_sectors << 9);
			continue;
		}

		cs.linux_cookie=req;
		lkl_disk_do_rw(dev->data, req->sector, req->current_nr_sectors,
			       req->buffer, rq_data_dir(req), &cs);
		/*
		 * Async is broken.
		 */
		BUG_ON (cs.sync == 0);
		blk_end_request(req, cs.error ? -EIO : 0, blk_rq_bytes(req));
	}
}
Exemplo n.º 13
0
static void pd_next_buf( int unit )

{	long	saved_flags;

	spin_lock_irqsave(&pd_lock,saved_flags);
	end_request(1);
	if (!pd_run) {  spin_unlock_irqrestore(&pd_lock,saved_flags);
			return; 
	}
	
/* paranoia */

	if (QUEUE_EMPTY ||
	    (rq_data_dir(CURRENT) != pd_cmd) ||
	    (minor(CURRENT->rq_dev) != pd_dev) ||
	    (CURRENT->rq_status == RQ_INACTIVE) ||
	    (CURRENT->sector != pd_block)) 
		printk("%s: OUCH: request list changed unexpectedly\n",
			PD.name);

	pd_count = CURRENT->current_nr_sectors;
	pd_buf = CURRENT->buffer;
	spin_unlock_irqrestore(&pd_lock,saved_flags);
}
Exemplo n.º 14
0
/*
 * row_add_request() - Add request to the scheduler
 * @q:	requests queue
 * @rq:	request to add
 *
 */
static void row_add_request(struct request_queue *q, struct request *rq)
{
	struct row_data *rd = (struct row_data *)q->elevator->elevator_data;
	struct row_queue *rqueue = RQ_ROWQ(rq);

	list_add_tail(&rq->queuelist, &rqueue->fifo);
	rd->nr_reqs[rq_data_dir(rq)]++;
	rq_set_fifo_time(rq, jiffies); /* for statistics*/

	if (queue_idling_enabled[rqueue->prio]) {
		if (delayed_work_pending(&rd->read_idle.idle_work))
			(void)cancel_delayed_work(
				&rd->read_idle.idle_work);
		if (time_before(jiffies, rqueue->idle_data.idle_trigger_time)) {
			rqueue->idle_data.begin_idling = true;
			row_log_rowq(rd, rqueue->prio, "Enable idling");
		} else
			rqueue->idle_data.begin_idling = false;

		rqueue->idle_data.idle_trigger_time =
			jiffies + msecs_to_jiffies(rd->read_idle.freq);
	}
	row_log_rowq(rd, rqueue->prio, "added request");
}
Exemplo n.º 15
0
/*
 * The simple form of the request function.
 */
static void sbull_request(struct request_queue *q)
{
	struct request *req;

	while ((req = blk_fetch_request(q)) != NULL) {
		do {
			struct sbull_dev *dev = req->rq_disk->private_data;
			if (req->cmd_type != REQ_TYPE_FS) {
				printk (KERN_NOTICE "Skip non-fs request\n");
				if (!__blk_end_request_cur(req, -1))
					req = NULL;
				continue;
			}
			//    	printk (KERN_NOTICE "Req dev %d dir %ld sec %ld, nr %d f %lx\n",
			//    			dev - Devices, rq_data_dir(req),
			//    			req->sector, req->current_nr_sectors,
			//    			req->flags);
			sbull_transfer(dev, blk_rq_pos(req), blk_rq_cur_sectors(req),
				       req->buffer, rq_data_dir(req));
			if (!__blk_end_request_cur(req, 0))
				req = NULL;
		} while(req != NULL);
	}
}
Exemplo n.º 16
0
static void do_z2_request(struct request_queue *q)
{
	struct request *req;

	req = blk_fetch_request(q);
	while (req) {
		unsigned long start = blk_rq_pos(req) << 9;
		unsigned long len  = blk_rq_cur_bytes(req);
		int err = 0;

		if (start + len > z2ram_size) {
			pr_err(DEVICE_NAME ": bad access: block=%llu, "
			       "count=%u\n",
			       (unsigned long long)blk_rq_pos(req),
			       blk_rq_cur_sectors(req));
			err = -EIO;
			goto done;
		}
		while (len) {
			unsigned long addr = start & Z2RAM_CHUNKMASK;
			unsigned long size = Z2RAM_CHUNKSIZE - addr;
			if (len < size)
				size = len;
			addr += z2ram_map[ start >> Z2RAM_CHUNKSHIFT ];
			if (rq_data_dir(req) == READ)
				memcpy(req->buffer, (char *)addr, size);
			else
				memcpy((char *)addr, req->buffer, size);
			start += size;
			len -= size;
		}
	done:
		if (!__blk_end_request_cur(req, err))
			req = blk_fetch_request(q);
	}
}
Exemplo n.º 17
0
/*
 * row_add_request() - Add request to the scheduler
 * @q:	requests queue
 * @rq:	request to add
 *
 */
static void row_add_request(struct request_queue *q,
			    struct request *rq)
{
	struct row_data *rd = (struct row_data *)q->elevator->elevator_data;
	struct row_queue *rqueue = RQ_ROWQ(rq);

	list_add_tail(&rq->queuelist, &rqueue->fifo);
	rd->nr_reqs[rq_data_dir(rq)]++;
	rqueue->nr_req++;
	rq_set_fifo_time(rq, jiffies); /* for statistics*/

	if (row_queues_def[rqueue->prio].idling_enabled) {
		if (delayed_work_pending(&rd->read_idle.idle_work))
			(void)cancel_delayed_work(
				&rd->read_idle.idle_work);
		if (ktime_to_ms(ktime_sub(ktime_get(),
				rqueue->idle_data.last_insert_time)) <
				rd->read_idle.freq) {
			rqueue->idle_data.begin_idling = true;
			row_log_rowq(rd, rqueue->prio, "Enable idling");
		} else {
			rqueue->idle_data.begin_idling = false;
			row_log_rowq(rd, rqueue->prio, "Disable idling");
		}

		rqueue->idle_data.last_insert_time = ktime_get();
	}
	if (row_queues_def[rqueue->prio].is_urgent &&
	    row_rowq_unserved(rd, rqueue->prio)) {
		row_log_rowq(rd, rqueue->prio,
			"added urgent request (total on queue=%d)",
			rqueue->nr_req);
	} else
		row_log_rowq(rd, rqueue->prio,
			"added request (total on queue=%d)", rqueue->nr_req);
}
Exemplo n.º 18
0
static ide_startstop_t ide_floppy_do_request(ide_drive_t *drive,
					     struct request *rq, sector_t block)
{
	struct ide_disk_obj *floppy = drive->driver_data;
	struct ide_cmd cmd;
	struct ide_atapi_pc *pc;

	ide_debug_log(IDE_DBG_FUNC, "enter, cmd: 0x%x\n", rq->cmd[0]);

	if (drive->debug_mask & IDE_DBG_RQ)
		blk_dump_rq_flags(rq, (rq->rq_disk
					? rq->rq_disk->disk_name
					: "dev?"));

	if (rq->errors >= ERROR_MAX) {
		if (drive->failed_pc) {
			ide_floppy_report_error(floppy, drive->failed_pc);
			drive->failed_pc = NULL;
		} else
			printk(KERN_ERR PFX "%s: I/O error\n", drive->name);

		if (blk_special_request(rq)) {
			rq->errors = 0;
			ide_complete_rq(drive, 0, blk_rq_bytes(rq));
			return ide_stopped;
		} else
			goto out_end;
	}
	if (blk_fs_request(rq)) {
		if (((long)blk_rq_pos(rq) % floppy->bs_factor) ||
		    (blk_rq_sectors(rq) % floppy->bs_factor)) {
			printk(KERN_ERR PFX "%s: unsupported r/w rq size\n",
				drive->name);
			goto out_end;
		}
		pc = &floppy->queued_pc;
		idefloppy_create_rw_cmd(drive, pc, rq, (unsigned long)block);
	} else if (blk_special_request(rq) || blk_sense_request(rq)) {
		pc = (struct ide_atapi_pc *)rq->special;
	} else if (blk_pc_request(rq)) {
		pc = &floppy->queued_pc;
		idefloppy_blockpc_cmd(floppy, pc, rq);
	} else
		BUG();

	ide_prep_sense(drive, rq);

	memset(&cmd, 0, sizeof(cmd));

	if (rq_data_dir(rq))
		cmd.tf_flags |= IDE_TFLAG_WRITE;

	cmd.rq = rq;

	if (blk_fs_request(rq) || blk_rq_bytes(rq)) {
		ide_init_sg_cmd(&cmd, blk_rq_bytes(rq));
		ide_map_sg(drive, &cmd);
	}

	pc->rq = rq;

	return ide_floppy_issue_pc(drive, &cmd, pc);
out_end:
	drive->failed_pc = NULL;
	if (blk_fs_request(rq) == 0 && rq->errors == 0)
		rq->errors = -EIO;
	ide_complete_rq(drive, -EIO, blk_rq_bytes(rq));
	return ide_stopped;
}
Exemplo n.º 19
0
static int sg_io(struct request_queue *q, struct gendisk *bd_disk,
		struct sg_io_hdr *hdr, fmode_t mode)
{
	unsigned long start_time;
	ssize_t ret = 0;
	int writing = 0;
	int at_head = 0;
	struct request *rq;
	char sense[SCSI_SENSE_BUFFERSIZE];
	struct bio *bio;

	if (hdr->interface_id != 'S')
		return -EINVAL;

	if (hdr->dxfer_len > (queue_max_hw_sectors(q) << 9))
		return -EIO;

	if (hdr->dxfer_len)
		switch (hdr->dxfer_direction) {
		default:
			return -EINVAL;
		case SG_DXFER_TO_DEV:
			writing = 1;
			break;
		case SG_DXFER_TO_FROM_DEV:
		case SG_DXFER_FROM_DEV:
			break;
		}
	if (hdr->flags & SG_FLAG_Q_AT_HEAD)
		at_head = 1;

	ret = -ENOMEM;
	rq = blk_get_request(q, writing ? WRITE : READ, GFP_KERNEL);
	if (IS_ERR(rq))
		return PTR_ERR(rq);
	blk_rq_set_block_pc(rq);

	if (hdr->cmd_len > BLK_MAX_CDB) {
		rq->cmd = kzalloc(hdr->cmd_len, GFP_KERNEL);
		if (!rq->cmd)
			goto out_put_request;
	}

	ret = blk_fill_sghdr_rq(q, rq, hdr, mode);
	if (ret < 0)
		goto out_free_cdb;

	ret = 0;
	if (hdr->iovec_count) {
		struct iov_iter i;
		struct iovec *iov = NULL;

		ret = import_iovec(rq_data_dir(rq),
				   hdr->dxferp, hdr->iovec_count,
				   0, &iov, &i);
		if (ret < 0)
			goto out_free_cdb;

		/* SG_IO howto says that the shorter of the two wins */
		iov_iter_truncate(&i, hdr->dxfer_len);

		ret = blk_rq_map_user_iov(q, rq, NULL, &i, GFP_KERNEL);
		kfree(iov);
	} else if (hdr->dxfer_len)
		ret = blk_rq_map_user(q, rq, NULL, hdr->dxferp, hdr->dxfer_len,
				      GFP_KERNEL);

	if (ret)
		goto out_free_cdb;

	bio = rq->bio;
	memset(sense, 0, sizeof(sense));
	rq->sense = sense;
	rq->sense_len = 0;
	rq->retries = 0;

	start_time = jiffies;

	/* ignore return value. All information is passed back to caller
	 * (if he doesn't check that is his problem).
	 * N.B. a non-zero SCSI status is _not_ necessarily an error.
	 */
	blk_execute_rq(q, bd_disk, rq, at_head);

	hdr->duration = jiffies_to_msecs(jiffies - start_time);

	ret = blk_complete_sghdr_rq(rq, hdr, bio);

out_free_cdb:
	if (rq->cmd != rq->__cmd)
		kfree(rq->cmd);
out_put_request:
	blk_put_request(rq);
	return ret;
}
Exemplo n.º 20
0
static int virtio_queue_rq(struct blk_mq_hw_ctx *hctx,
			   const struct blk_mq_queue_data *bd)
{
	struct virtio_blk *vblk = hctx->queue->queuedata;
	struct request *req = bd->rq;
	struct virtblk_req *vbr = blk_mq_rq_to_pdu(req);
	unsigned long flags;
	unsigned int num;
	int qid = hctx->queue_num;
	int err;
	bool notify = false;

	BUG_ON(req->nr_phys_segments + 2 > vblk->sg_elems);

	vbr->req = req;
	if (req->cmd_flags & REQ_FLUSH) {
		vbr->out_hdr.type = cpu_to_virtio32(vblk->vdev, VIRTIO_BLK_T_FLUSH);
		vbr->out_hdr.sector = 0;
		vbr->out_hdr.ioprio = cpu_to_virtio32(vblk->vdev, req_get_ioprio(vbr->req));
	} else {
		switch (req->cmd_type) {
		case REQ_TYPE_FS:
			vbr->out_hdr.type = 0;
			vbr->out_hdr.sector = cpu_to_virtio64(vblk->vdev, blk_rq_pos(vbr->req));
			vbr->out_hdr.ioprio = cpu_to_virtio32(vblk->vdev, req_get_ioprio(vbr->req));
			break;
		case REQ_TYPE_BLOCK_PC:
			vbr->out_hdr.type = cpu_to_virtio32(vblk->vdev, VIRTIO_BLK_T_SCSI_CMD);
			vbr->out_hdr.sector = 0;
			vbr->out_hdr.ioprio = cpu_to_virtio32(vblk->vdev, req_get_ioprio(vbr->req));
			break;
		case REQ_TYPE_DRV_PRIV:
			vbr->out_hdr.type = cpu_to_virtio32(vblk->vdev, VIRTIO_BLK_T_GET_ID);
			vbr->out_hdr.sector = 0;
			vbr->out_hdr.ioprio = cpu_to_virtio32(vblk->vdev, req_get_ioprio(vbr->req));
			break;
		default:
			/* We don't put anything else in the queue. */
			BUG();
		}
	}

	blk_mq_start_request(req);

	num = blk_rq_map_sg(hctx->queue, vbr->req, vbr->sg);
	if (num) {
		if (rq_data_dir(vbr->req) == WRITE)
			vbr->out_hdr.type |= cpu_to_virtio32(vblk->vdev, VIRTIO_BLK_T_OUT);
		else
			vbr->out_hdr.type |= cpu_to_virtio32(vblk->vdev, VIRTIO_BLK_T_IN);
	}

	spin_lock_irqsave(&vblk->vqs[qid].lock, flags);
	err = __virtblk_add_req(vblk->vqs[qid].vq, vbr, vbr->sg, num);
	if (err) {
		virtqueue_kick(vblk->vqs[qid].vq);
		blk_mq_stop_hw_queue(hctx);
		spin_unlock_irqrestore(&vblk->vqs[qid].lock, flags);
		/* Out of mem doesn't actually happen, since we fall back
		 * to direct descriptors */
		if (err == -ENOMEM || err == -ENOSPC)
			return BLK_MQ_RQ_QUEUE_BUSY;
		return BLK_MQ_RQ_QUEUE_ERROR;
	}

	if (bd->last && virtqueue_kick_prepare(vblk->vqs[qid].vq))
		notify = true;
	spin_unlock_irqrestore(&vblk->vqs[qid].lock, flags);

	if (notify)
		virtqueue_notify(vblk->vqs[qid].vq);
	return BLK_MQ_RQ_QUEUE_OK;
}
Exemplo n.º 21
0
static irqreturn_t swim3_interrupt(int irq, void *dev_id)
{
	struct floppy_state *fs = (struct floppy_state *) dev_id;
	struct swim3 __iomem *sw = fs->swim3;
	int intr, err, n;
	int stat, resid;
	struct dbdma_regs __iomem *dr;
	struct dbdma_cmd *cp;

	intr = in_8(&sw->intr);
	err = (intr & ERROR_INTR)? in_8(&sw->error): 0;
	if ((intr & ERROR_INTR) && fs->state != do_transfer)
		printk(KERN_ERR "swim3_interrupt, state=%d, dir=%x, intr=%x, err=%x\n",
		       fs->state, rq_data_dir(fd_req), intr, err);
	switch (fs->state) {
	case locating:
		if (intr & SEEN_SECTOR) {
			out_8(&sw->control_bic, DO_ACTION | WRITE_SECTORS);
			out_8(&sw->select, RELAX);
			out_8(&sw->intr_enable, 0);
			del_timer(&fs->timeout);
			fs->timeout_pending = 0;
			if (sw->ctrack == 0xff) {
				printk(KERN_ERR "swim3: seen sector but cyl=ff?\n");
				fs->cur_cyl = -1;
				if (fs->retries > 5) {
					swim3_end_request_cur(-EIO);
					fs->state = idle;
					start_request(fs);
				} else {
					fs->state = jogging;
					act(fs);
				}
				break;
			}
			fs->cur_cyl = sw->ctrack;
			fs->cur_sector = sw->csect;
			if (fs->expect_cyl != -1 && fs->expect_cyl != fs->cur_cyl)
				printk(KERN_ERR "swim3: expected cyl %d, got %d\n",
				       fs->expect_cyl, fs->cur_cyl);
			fs->state = do_transfer;
			act(fs);
		}
		break;
	case seeking:
	case jogging:
		if (sw->nseek == 0) {
			out_8(&sw->control_bic, DO_SEEK);
			out_8(&sw->select, RELAX);
			out_8(&sw->intr_enable, 0);
			del_timer(&fs->timeout);
			fs->timeout_pending = 0;
			if (fs->state == seeking)
				++fs->retries;
			fs->state = settling;
			act(fs);
		}
		break;
	case settling:
		out_8(&sw->intr_enable, 0);
		del_timer(&fs->timeout);
		fs->timeout_pending = 0;
		act(fs);
		break;
	case do_transfer:
		if ((intr & (ERROR_INTR | TRANSFER_DONE)) == 0)
			break;
		out_8(&sw->intr_enable, 0);
		out_8(&sw->control_bic, WRITE_SECTORS | DO_ACTION);
		out_8(&sw->select, RELAX);
		del_timer(&fs->timeout);
		fs->timeout_pending = 0;
		dr = fs->dma;
		cp = fs->dma_cmd;
		if (rq_data_dir(fd_req) == WRITE)
			++cp;
		/*
		 * Check that the main data transfer has finished.
		 * On writing, the swim3 sometimes doesn't use
		 * up all the bytes of the postamble, so we can still
		 * see DMA active here.  That doesn't matter as long
		 * as all the sector data has been transferred.
		 */
		if ((intr & ERROR_INTR) == 0 && cp->xfer_status == 0) {
			/* wait a little while for DMA to complete */
			for (n = 0; n < 100; ++n) {
				if (cp->xfer_status != 0)
					break;
				udelay(1);
				barrier();
			}
		}
		/* turn off DMA */
		out_le32(&dr->control, (RUN | PAUSE) << 16);
		stat = ld_le16(&cp->xfer_status);
		resid = ld_le16(&cp->res_count);
		if (intr & ERROR_INTR) {
			n = fs->scount - 1 - resid / 512;
			if (n > 0) {
				blk_update_request(fd_req, 0, n << 9);
				fs->req_sector += n;
			}
			if (fs->retries < 5) {
				++fs->retries;
				act(fs);
			} else {
				printk("swim3: error %sing block %ld (err=%x)\n",
				       rq_data_dir(fd_req) == WRITE? "writ": "read",
				       (long)blk_rq_pos(fd_req), err);
				swim3_end_request_cur(-EIO);
				fs->state = idle;
			}
		} else {
			if ((stat & ACTIVE) == 0 || resid != 0) {
				/* musta been an error */
				printk(KERN_ERR "swim3: fd dma: stat=%x resid=%d\n", stat, resid);
				printk(KERN_ERR "  state=%d, dir=%x, intr=%x, err=%x\n",
				       fs->state, rq_data_dir(fd_req), intr, err);
				swim3_end_request_cur(-EIO);
				fs->state = idle;
				start_request(fs);
				break;
			}
			if (swim3_end_request(0, fs->scount << 9)) {
				fs->req_sector += fs->scount;
				if (fs->req_sector > fs->secpertrack) {
					fs->req_sector -= fs->secpertrack;
					if (++fs->head > 1) {
						fs->head = 0;
						++fs->req_cyl;
					}
				}
				act(fs);
			} else
				fs->state = idle;
		}
		if (fs->state == idle)
			start_request(fs);
		break;
	default:
		printk(KERN_ERR "swim3: don't know what to do in state %d\n", fs->state);
	}
	return IRQ_HANDLED;
}
Exemplo n.º 22
0
	/* We must wait a bit for dbdma to stop */
	for (n = 0; (in_le32(&dr->status) & ACTIVE) && n < 1000; n++)
		udelay(1);
	out_8(&sw->intr_enable, 0);
	out_8(&sw->control_bic, WRITE_SECTORS | DO_ACTION);
	out_8(&sw->select, RELAX);
	if (rq_data_dir(fd_req) == WRITE)
		++cp;
	if (ld_le16(&cp->xfer_status) != 0)
		s = fs->scount - ((ld_le16(&cp->res_count) + 511) >> 9);
	else
		s = 0;
	fd_req->sector += s;
	fd_req->current_nr_sectors -= s;
	printk(KERN_ERR "swim3: timeout %sing sector %ld\n",
	       (rq_data_dir(fd_req)==WRITE? "writ": "read"), (long)fd_req->sector);
	end_request(fd_req, 0);
	fs->state = idle;
	start_request(fs);
}

static irqreturn_t swim3_interrupt(int irq, void *dev_id, struct pt_regs *regs)
{
	struct floppy_state *fs = (struct floppy_state *) dev_id;
	struct swim3 __iomem *sw = fs->swim3;
	int intr, err, n;
	int stat, resid;
	struct dbdma_regs __iomem *dr;
	struct dbdma_cmd *cp;

	intr = in_8(&sw->intr);
Exemplo n.º 23
0
/*
 * row_add_request() - Add request to the scheduler
 * @q:	requests queue
 * @rq:	request to add
 *
 */
static void row_add_request(struct request_queue *q,
			    struct request *rq)
{
	struct row_data *rd = (struct row_data *)q->elevator->elevator_data;
	struct row_queue *rqueue = RQ_ROWQ(rq);
	s64 diff_ms;
	bool queue_was_empty = list_empty(&rqueue->fifo);

	list_add_tail(&rq->queuelist, &rqueue->fifo);
	rd->nr_reqs[rq_data_dir(rq)]++;
	rqueue->nr_req++;
	rq_set_fifo_time(rq, jiffies); /* for statistics*/

	if (rq->cmd_flags & REQ_URGENT) {
		WARN_ON(1);
		blk_dump_rq_flags(rq, "");
		rq->cmd_flags &= ~REQ_URGENT;
	}

	if (row_queues_def[rqueue->prio].idling_enabled) {
		if (rd->rd_idle_data.idling_queue_idx == rqueue->prio &&
		    hrtimer_active(&rd->rd_idle_data.hr_timer)) {
			if (hrtimer_try_to_cancel(
				&rd->rd_idle_data.hr_timer) >= 0) {
				row_log_rowq(rd, rqueue->prio,
				    "Canceled delayed work on %d",
				    rd->rd_idle_data.idling_queue_idx);
				rd->rd_idle_data.idling_queue_idx =
					ROWQ_MAX_PRIO;
			}
		}
		diff_ms = ktime_to_ms(ktime_sub(ktime_get(),
				rqueue->idle_data.last_insert_time));
		if (unlikely(diff_ms < 0)) {
			pr_err("%s(): time delta error: diff_ms < 0",
				__func__);
			rqueue->idle_data.begin_idling = false;
			return;
		}
		if (diff_ms < rd->rd_idle_data.freq_ms) {
			rqueue->idle_data.begin_idling = true;
			row_log_rowq(rd, rqueue->prio, "Enable idling");
		} else {
			rqueue->idle_data.begin_idling = false;
			row_log_rowq(rd, rqueue->prio, "Disable idling (%ldms)",
				(long)diff_ms);
		}

		rqueue->idle_data.last_insert_time = ktime_get();
	}
	if (row_queues_def[rqueue->prio].is_urgent &&
	    !rd->pending_urgent_rq && !rd->urgent_in_flight) {
		/* Handle High Priority queues */
		if (rqueue->prio < ROWQ_REG_PRIO_IDX &&
		    rd->last_served_ioprio_class != IOPRIO_CLASS_RT &&
		    queue_was_empty) {
			row_log_rowq(rd, rqueue->prio,
				"added (high prio) urgent request");
			rq->cmd_flags |= REQ_URGENT;
			rd->pending_urgent_rq = rq;
		} else  if (row_rowq_unserved(rd, rqueue->prio)) {
			/* Handle Regular priotity queues */
			row_log_rowq(rd, rqueue->prio,
				"added urgent request (total on queue=%d)",
				rqueue->nr_req);
			rq->cmd_flags |= REQ_URGENT;
			rd->pending_urgent_rq = rq;
		}
	} else
		row_log_rowq(rd, rqueue->prio,
			"added request (total on queue=%d)", rqueue->nr_req);
}
Exemplo n.º 24
0
static int mmc_queue_thread(void *d)
{
	struct mmc_queue *mq = d;
	struct request_queue *q = mq->queue;
	struct request *req;

#ifdef CONFIG_MMC_PERF_PROFILING
	ktime_t start, diff;
	struct mmc_host *host = mq->card->host;
	unsigned long bytes_xfer;
#endif


	current->flags |= PF_MEMALLOC;

	down(&mq->thread_sem);
	do {
		req = NULL;	/* Must be set to NULL at each iteration */

		spin_lock_irq(q->queue_lock);
		set_current_state(TASK_INTERRUPTIBLE);
		if (!blk_queue_plugged(q))
			req = blk_fetch_request(q);
		mq->req = req;
		spin_unlock_irq(q->queue_lock);

		if (!req) {
			if (kthread_should_stop()) {
				set_current_state(TASK_RUNNING);
				break;
			}
			up(&mq->thread_sem);
			schedule();
			down(&mq->thread_sem);
			continue;
		}
		set_current_state(TASK_RUNNING);

#ifdef CONFIG_IDLECTRL_EMMC_CUST_SH
		mmc_idle_wake_lock();
#endif /* CONFIG_IDLECTRL_EMMC_CUST_SH */

#ifdef CONFIG_MMC_PERF_PROFILING
		bytes_xfer = blk_rq_bytes(req);
		if (rq_data_dir(req) == READ) {
			start = ktime_get();
			mq->issue_fn(mq, req);
			diff = ktime_sub(ktime_get(), start);
			host->perf.rbytes_mmcq += bytes_xfer;
			host->perf.rtime_mmcq =
				ktime_add(host->perf.rtime_mmcq, diff);
		} else {
			start = ktime_get();
			mq->issue_fn(mq, req);
			diff = ktime_sub(ktime_get(), start);
			host->perf.wbytes_mmcq += bytes_xfer;
			host->perf.wtime_mmcq =
				ktime_add(host->perf.wtime_mmcq, diff);
		}
#else
			mq->issue_fn(mq, req);
#endif

#ifdef CONFIG_IDLECTRL_EMMC_CUST_SH
		mmc_idle_wake_unlock();
#endif /* CONFIG_IDLECTRL_EMMC_CUST_SH */

	} while (1);
	up(&mq->thread_sem);

	return 0;
}
Exemplo n.º 25
0
/*
 * The driver enables interrupts as much as possible.  In order to do this,
 * (a) the device-interrupt is disabled before entering hd_request(),
 * and (b) the timeout-interrupt is disabled before the sti().
 *
 * Interrupts are still masked (by default) whenever we are exchanging
 * data/cmds with a drive, because some drives seem to have very poor
 * tolerance for latency during I/O. The IDE driver has support to unmask
 * interrupts for non-broken hardware, so use that driver if required.
 */
static void hd_request(void)
{
	unsigned int block, nsect, sec, track, head, cyl;
	struct hd_i_struct *disk;
	struct request *req;

	if (do_hd)
		return;
repeat:
	del_timer(&device_timer);
	local_irq_enable();

	req = CURRENT;
	if (!req) {
		do_hd = NULL;
		return;
	}

	if (reset) {
		local_irq_disable();
		reset_hd();
		return;
	}
	disk = req->rq_disk->private_data;
	block = req->sector;
	nsect = req->nr_sectors;
	if (block >= get_capacity(req->rq_disk) ||
	    ((block+nsect) > get_capacity(req->rq_disk))) {
		printk("%s: bad access: block=%d, count=%d\n",
			req->rq_disk->disk_name, block, nsect);
		end_request(req, 0);
		goto repeat;
	}

	if (disk->special_op) {
		if (do_special_op(disk, req))
			goto repeat;
		return;
	}
	sec   = block % disk->sect + 1;
	track = block / disk->sect;
	head  = track % disk->head;
	cyl   = track / disk->head;
#ifdef DEBUG
	printk("%s: %sing: CHS=%d/%d/%d, sectors=%d, buffer=%p\n",
		req->rq_disk->disk_name, (req->cmd == READ)?"read":"writ",
		cyl, head, sec, nsect, req->buffer);
#endif
	if (blk_fs_request(req)) {
		switch (rq_data_dir(req)) {
		case READ:
			hd_out(disk,nsect,sec,head,cyl,WIN_READ,&read_intr);
			if (reset)
				goto repeat;
			break;
		case WRITE:
			hd_out(disk,nsect,sec,head,cyl,WIN_WRITE,&write_intr);
			if (reset)
				goto repeat;
			if (wait_DRQ()) {
				bad_rw_intr();
				goto repeat;
			}
			outsw(HD_DATA,req->buffer,256);
			break;
		default:
			printk("unknown hd-command\n");
			end_request(req, 0);
			break;
		}
	}
}
Exemplo n.º 26
0
/* issue astoria blkdev request (issue_fn) */
static int cyasblkdev_blk_issue_rq(
					struct cyasblkdev_queue *bq,
					struct request *req
					)
{
	struct cyasblkdev_blk_data *bd = bq->data;
	int index = 0;
	int ret = CY_AS_ERROR_SUCCESS;
	uint32_t req_sector = 0;
	uint32_t req_nr_sectors = 0;
	int bus_num = 0;
	int lcl_unit_no = 0;

	DBGPRN_FUNC_NAME;

	/*
	 * will construct a scatterlist for the given request;
	 * the return value is the number of actually used
	 * entries in the resulting list. Then, this scatterlist
	 * can be used for the actual DMA prep operation.
	 */
	spin_lock_irq(&bd->lock);
	index = blk_rq_map_sg(bq->queue, req, bd->sg);

	if (req->rq_disk == bd->user_disk_0) {
		bus_num = bd->user_disk_0_bus_num;
		req_sector = blk_rq_pos(req) + gl_bd->user_disk_0_first_sector;
		req_nr_sectors = blk_rq_sectors(req);
		lcl_unit_no = gl_bd->user_disk_0_unit_no;

		#ifndef WESTBRIDGE_NDEBUG
		cy_as_hal_print_message("%s: request made to disk 0 "
			"for sector=%d, num_sectors=%d, unit_no=%d\n",
			__func__, req_sector, (int) blk_rq_sectors(req),
			lcl_unit_no);
		#endif
	} else if (req->rq_disk == bd->user_disk_1) {
		bus_num = bd->user_disk_1_bus_num;
		req_sector = blk_rq_pos(req) + gl_bd->user_disk_1_first_sector;
		/*SECT_NUM_TRANSLATE(blk_rq_sectors(req));*/
		req_nr_sectors = blk_rq_sectors(req);
		lcl_unit_no = gl_bd->user_disk_1_unit_no;

		#ifndef WESTBRIDGE_NDEBUG
		cy_as_hal_print_message("%s: request made to disk 1 for "
			"sector=%d, num_sectors=%d, unit_no=%d\n", __func__,
			req_sector, (int) blk_rq_sectors(req), lcl_unit_no);
		#endif
	} else if (req->rq_disk == bd->system_disk) {
		bus_num = bd->system_disk_bus_num;
		req_sector = blk_rq_pos(req) + gl_bd->system_disk_first_sector;
		req_nr_sectors = blk_rq_sectors(req);
		lcl_unit_no = gl_bd->system_disk_unit_no;

		#ifndef WESTBRIDGE_NDEBUG
		cy_as_hal_print_message("%s: request made to system disk "
			"for sector=%d, num_sectors=%d, unit_no=%d\n", __func__,
			req_sector, (int) blk_rq_sectors(req), lcl_unit_no);
		#endif
	}
	#ifndef WESTBRIDGE_NDEBUG
	else {
		cy_as_hal_print_message(
			"%s: invalid disk used for request\n", __func__);
	}
	#endif

	spin_unlock_irq(&bd->lock);

	if (rq_data_dir(req) == READ) {
		#ifndef WESTBRIDGE_NDEBUG
		cy_as_hal_print_message("%s: calling readasync() "
			"req_sector=0x%x, req_nr_sectors=0x%x, bd->sg:%x\n\n",
			__func__, req_sector, req_nr_sectors, (uint32_t)bd->sg);
		#endif

		ret = cy_as_storage_read_async(bd->dev_handle, bus_num, 0,
			lcl_unit_no, req_sector, bd->sg, req_nr_sectors,
			(cy_as_storage_callback)cyasblkdev_issuecallback);

		if (ret != CY_AS_ERROR_SUCCESS) {
			#ifndef WESTBRIDGE_NDEBUG
			cy_as_hal_print_message("%s:readasync() error %d at "
				"address %ld, unit no %d\n", __func__, ret,
				blk_rq_pos(req), lcl_unit_no);
			cy_as_hal_print_message("%s:ending i/o request "
				"on reg:%x\n", __func__, (uint32_t)req);
			#endif

			while (blk_end_request(req,
				(ret == CY_AS_ERROR_SUCCESS),
				req_nr_sectors*512))
				;

			bq->req = NULL;
		}
	} else {
		ret = cy_as_storage_write_async(bd->dev_handle, bus_num, 0,
			lcl_unit_no, req_sector, bd->sg, req_nr_sectors,
			(cy_as_storage_callback)cyasblkdev_issuecallback);

		if (ret != CY_AS_ERROR_SUCCESS) {
			#ifndef WESTBRIDGE_NDEBUG
			cy_as_hal_print_message("%s: write failed with "
			"error %d at address %ld, unit no %d\n",
			__func__, ret, blk_rq_pos(req), lcl_unit_no);
			#endif

			/*end IO op on this request(does both
			 * end_that_request_... _first & _last) */
			while (blk_end_request(req,
				(ret == CY_AS_ERROR_SUCCESS),
				req_nr_sectors*512))
				;

			bq->req = NULL;
		}
	}

	return ret;
}
Exemplo n.º 27
0
static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req)
{
	struct mmc_blk_data *md = mq->data;
	struct mmc_card *card = md->queue.card;
	struct mmc_blk_request brq;
	int ret = 1;

	if (mmc_card_claim_host(card))
		goto flush_queue;

	do {
		struct mmc_command cmd;
		u32 readcmd, writecmd;

		memset(&brq, 0, sizeof(struct mmc_blk_request));
		brq.mrq.cmd = &brq.cmd;
		brq.mrq.data = &brq.data;

		brq.cmd.arg = req->sector;
		if (!mmc_card_blockaddr(card))
			brq.cmd.arg <<= 9;
		brq.cmd.flags = MMC_RSP_R1 | MMC_CMD_ADTC;
		brq.data.blksz = 1 << md->block_bits;
		brq.stop.opcode = MMC_STOP_TRANSMISSION;
		brq.stop.arg = 0;
		brq.stop.flags = MMC_RSP_R1B | MMC_CMD_AC;
		brq.data.blocks = req->nr_sectors >> (md->block_bits - 9);
		if (brq.data.blocks > card->host->max_blk_count)
			brq.data.blocks = card->host->max_blk_count;

		mmc_set_data_timeout(&brq.data, card, rq_data_dir(req) != READ);

#ifdef CONFIG_MMC_SUPPORT_MOVINAND
		if (mmc_card_movinand(card)) {
			if ((brq.data.blocks > 1) || (rq_data_dir(req) == WRITE)) {
				cmd.opcode = MMC_SET_BLOCK_COUNT;
				cmd.arg = req->nr_sectors;
				cmd.flags = MMC_RSP_R1;
				ret = mmc_wait_for_cmd(card->host, &cmd, 2);
			}
			if (rq_data_dir(req) == READ) {
				if (brq.data.blocks > 1) {
					brq.cmd.opcode = MMC_READ_MULTIPLE_BLOCK;
					brq.data.flags |= (MMC_DATA_READ | MMC_DATA_MULTI);
//					brq.mrq.stop = &brq.stop;
				} else {
					brq.cmd.opcode = MMC_READ_SINGLE_BLOCK;
					brq.data.flags |= MMC_DATA_READ;
					brq.mrq.stop = NULL;
				}
			} else {
				brq.cmd.opcode = MMC_WRITE_MULTIPLE_BLOCK;
				brq.data.flags |= MMC_DATA_WRITE | MMC_DATA_MULTI;
//				brq.mrq.stop = &brq.stop;
			}
		} else {
#endif

		/*
		 * If the host doesn't support multiple block writes, force
		 * block writes to single block. SD cards are excepted from
		 * this rule as they support querying the number of
		 * successfully written sectors.
		 */
		if (rq_data_dir(req) != READ &&
		    !(card->host->caps & MMC_CAP_MULTIWRITE) &&
		    !mmc_card_sd(card))
			brq.data.blocks = 1;

		if (brq.data.blocks > 1) {
			brq.data.flags |= MMC_DATA_MULTI;
			brq.mrq.stop = &brq.stop;
			readcmd = MMC_READ_MULTIPLE_BLOCK;
			writecmd = MMC_WRITE_MULTIPLE_BLOCK;
		} else {
			brq.mrq.stop = NULL;
			readcmd = MMC_READ_SINGLE_BLOCK;
			writecmd = MMC_WRITE_BLOCK;
		}

		if (rq_data_dir(req) == READ) {
			brq.cmd.opcode = readcmd;
			brq.data.flags |= MMC_DATA_READ;
		} else {
			brq.cmd.opcode = writecmd;
			brq.data.flags |= MMC_DATA_WRITE;
		}
#ifdef CONFIG_MMC_SUPPORT_MOVINAND
		}
#endif

		brq.data.sg = mq->sg;
		brq.data.sg_len = blk_rq_map_sg(req->q, req, brq.data.sg);

		mmc_wait_for_req(card->host, &brq.mrq);
		if (brq.cmd.error) {
			printk(KERN_ERR "%s: error %d sending read/write command\n",
			       req->rq_disk->disk_name, brq.cmd.error);
			goto cmd_err;
		}

		if (brq.data.error) {
			printk(KERN_ERR "%s: error %d transferring data\n",
			       req->rq_disk->disk_name, brq.data.error);
			goto cmd_err;
		}

		if (brq.stop.error) {
			printk(KERN_ERR "%s: error %d sending stop command\n",
			       req->rq_disk->disk_name, brq.stop.error);
			goto cmd_err;
		}

		if (rq_data_dir(req) != READ) {
			do {
				int err;

				cmd.opcode = MMC_SEND_STATUS;
				cmd.arg = card->rca << 16;
				cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
				err = mmc_wait_for_cmd(card->host, &cmd, 5);
				if (err) {
					printk(KERN_ERR "%s: error %d requesting status\n",
					       req->rq_disk->disk_name, err);
					goto cmd_err;
				}
#ifdef CONFIG_MMC_SUPPORT_MOVINAND
				/* Work-around for broken cards setting READY_FOR_DATA
				 * when not actually ready.
				 */
				if (mmc_card_movinand(card)) {
					if (R1_CURRENT_STATE(cmd.resp[0]) == 7)
						cmd.resp[0] &= ~R1_READY_FOR_DATA;
				}
#endif
			} while (!(cmd.resp[0] & R1_READY_FOR_DATA));

#if 0
			if (cmd.resp[0] & ~0x00000900)
				printk(KERN_ERR "%s: status = %08x\n",
				       req->rq_disk->disk_name, cmd.resp[0]);
			if (mmc_decode_status(cmd.resp))
				goto cmd_err;
#endif
		}

		/*
		 * A block was successfully transferred.
		 */
		spin_lock_irq(&md->lock);
		ret = end_that_request_chunk(req, 1, brq.data.bytes_xfered);
		if (!ret) {
			/*
			 * The whole request completed successfully.
			 */
			add_disk_randomness(req->rq_disk);
			blkdev_dequeue_request(req);
			end_that_request_last(req, 1);
		}
		spin_unlock_irq(&md->lock);
	} while (ret);

	mmc_card_release_host(card);

	return 1;

 cmd_err:
 	/*
 	 * If this is an SD card and we're writing, we can first
 	 * mark the known good sectors as ok.
 	 *
	 * If the card is not SD, we can still ok written sectors
	 * if the controller can do proper error reporting.
	 *
	 * For reads we just fail the entire chunk as that should
	 * be safe in all cases.
	 */
 	if (rq_data_dir(req) != READ && mmc_card_sd(card)) {
		u32 blocks;
		unsigned int bytes;

		blocks = mmc_sd_num_wr_blocks(card);
		if (blocks != (u32)-1) {
			if (card->csd.write_partial)
				bytes = blocks << md->block_bits;
			else
				bytes = blocks << 9;
			spin_lock_irq(&md->lock);
			ret = end_that_request_chunk(req, 1, bytes);
			spin_unlock_irq(&md->lock);
		}
	} else if (rq_data_dir(req) != READ &&
		   (card->host->caps & MMC_CAP_MULTIWRITE)) {
		spin_lock_irq(&md->lock);
		ret = end_that_request_chunk(req, 1, brq.data.bytes_xfered);
		spin_unlock_irq(&md->lock);
	}

flush_queue:

	mmc_card_release_host(card);

	spin_lock_irq(&md->lock);
	while (ret) {
		ret = end_that_request_chunk(req, 0,
				req->current_nr_sectors << 9);
	}

	add_disk_randomness(req->rq_disk);
	blkdev_dequeue_request(req);
	end_that_request_last(req, 0);
	spin_unlock_irq(&md->lock);

	return 0;
}
Exemplo n.º 28
0
static inline struct rb_root *
deadline_rb_root(struct deadline_data *dd, struct request *rq)
{
	return &dd->sort_list[rq_data_dir(rq)];
}
Exemplo n.º 29
0
static void ace_fsm_dostate(struct ace_device *ace)
{
	struct request *req;
	u32 status;
	u16 val;
	int count;

#if defined(DEBUG)
	dev_dbg(ace->dev, "fsm_state=%i, id_req_count=%i\n",
		ace->fsm_state, ace->id_req_count);
#endif

	/* Verify that there is actually a CF in the slot. If not, then
	 * bail out back to the idle state and wake up all the waiters */
	status = ace_in32(ace, ACE_STATUS);
	if ((status & ACE_STATUS_CFDETECT) == 0) {
		ace->fsm_state = ACE_FSM_STATE_IDLE;
		ace->media_change = 1;
		set_capacity(ace->gd, 0);
		dev_info(ace->dev, "No CF in slot\n");

		/* Drop all in-flight and pending requests */
		if (ace->req) {
			__blk_end_request_all(ace->req, -EIO);
			ace->req = NULL;
		}
		while ((req = blk_fetch_request(ace->queue)) != NULL)
			__blk_end_request_all(req, -EIO);

		/* Drop back to IDLE state and notify waiters */
		ace->fsm_state = ACE_FSM_STATE_IDLE;
		ace->id_result = -EIO;
		while (ace->id_req_count) {
			complete(&ace->id_completion);
			ace->id_req_count--;
		}
	}

	switch (ace->fsm_state) {
	case ACE_FSM_STATE_IDLE:
		/* See if there is anything to do */
		if (ace->id_req_count || ace_get_next_request(ace->queue)) {
			ace->fsm_iter_num++;
			ace->fsm_state = ACE_FSM_STATE_REQ_LOCK;
			mod_timer(&ace->stall_timer, jiffies + HZ);
			if (!timer_pending(&ace->stall_timer))
				add_timer(&ace->stall_timer);
			break;
		}
		del_timer(&ace->stall_timer);
		ace->fsm_continue_flag = 0;
		break;

	case ACE_FSM_STATE_REQ_LOCK:
		if (ace_in(ace, ACE_STATUS) & ACE_STATUS_MPULOCK) {
			/* Already have the lock, jump to next state */
			ace->fsm_state = ACE_FSM_STATE_WAIT_CFREADY;
			break;
		}

		/* Request the lock */
		val = ace_in(ace, ACE_CTRL);
		ace_out(ace, ACE_CTRL, val | ACE_CTRL_LOCKREQ);
		ace->fsm_state = ACE_FSM_STATE_WAIT_LOCK;
		break;

	case ACE_FSM_STATE_WAIT_LOCK:
		if (ace_in(ace, ACE_STATUS) & ACE_STATUS_MPULOCK) {
			/* got the lock; move to next state */
			ace->fsm_state = ACE_FSM_STATE_WAIT_CFREADY;
			break;
		}

		/* wait a bit for the lock */
		ace_fsm_yield(ace);
		break;

	case ACE_FSM_STATE_WAIT_CFREADY:
		status = ace_in32(ace, ACE_STATUS);
		if (!(status & ACE_STATUS_RDYFORCFCMD) ||
		    (status & ACE_STATUS_CFBSY)) {
			/* CF card isn't ready; it needs to be polled */
			ace_fsm_yield(ace);
			break;
		}

		/* Device is ready for command; determine what to do next */
		if (ace->id_req_count)
			ace->fsm_state = ACE_FSM_STATE_IDENTIFY_PREPARE;
		else
			ace->fsm_state = ACE_FSM_STATE_REQ_PREPARE;
		break;

	case ACE_FSM_STATE_IDENTIFY_PREPARE:
		/* Send identify command */
		ace->fsm_task = ACE_TASK_IDENTIFY;
		ace->data_ptr = ace->cf_id;
		ace->data_count = ACE_BUF_PER_SECTOR;
		ace_out(ace, ACE_SECCNTCMD, ACE_SECCNTCMD_IDENTIFY);

		/* As per datasheet, put config controller in reset */
		val = ace_in(ace, ACE_CTRL);
		ace_out(ace, ACE_CTRL, val | ACE_CTRL_CFGRESET);

		/* irq handler takes over from this point; wait for the
		 * transfer to complete */
		ace->fsm_state = ACE_FSM_STATE_IDENTIFY_TRANSFER;
		ace_fsm_yieldirq(ace);
		break;

	case ACE_FSM_STATE_IDENTIFY_TRANSFER:
		/* Check that the sysace is ready to receive data */
		status = ace_in32(ace, ACE_STATUS);
		if (status & ACE_STATUS_CFBSY) {
			dev_dbg(ace->dev, "CFBSY set; t=%i iter=%i dc=%i\n",
				ace->fsm_task, ace->fsm_iter_num,
				ace->data_count);
			ace_fsm_yield(ace);
			break;
		}
		if (!(status & ACE_STATUS_DATABUFRDY)) {
			ace_fsm_yield(ace);
			break;
		}

		/* Transfer the next buffer */
		ace->reg_ops->datain(ace);
		ace->data_count--;

		/* If there are still buffers to be transfers; jump out here */
		if (ace->data_count != 0) {
			ace_fsm_yieldirq(ace);
			break;
		}

		/* transfer finished; kick state machine */
		dev_dbg(ace->dev, "identify finished\n");
		ace->fsm_state = ACE_FSM_STATE_IDENTIFY_COMPLETE;
		break;

	case ACE_FSM_STATE_IDENTIFY_COMPLETE:
		ace_fix_driveid(ace->cf_id);
		ace_dump_mem(ace->cf_id, 512);	/* Debug: Dump out disk ID */

		if (ace->data_result) {
			/* Error occured, disable the disk */
			ace->media_change = 1;
			set_capacity(ace->gd, 0);
			dev_err(ace->dev, "error fetching CF id (%i)\n",
				ace->data_result);
		} else {
			ace->media_change = 0;

			/* Record disk parameters */
			set_capacity(ace->gd,
				ata_id_u32(ace->cf_id, ATA_ID_LBA_CAPACITY));
			dev_info(ace->dev, "capacity: %i sectors\n",
				ata_id_u32(ace->cf_id, ATA_ID_LBA_CAPACITY));
		}

		/* We're done, drop to IDLE state and notify waiters */
		ace->fsm_state = ACE_FSM_STATE_IDLE;
		ace->id_result = ace->data_result;
		while (ace->id_req_count) {
			complete(&ace->id_completion);
			ace->id_req_count--;
		}
		break;

	case ACE_FSM_STATE_REQ_PREPARE:
		req = ace_get_next_request(ace->queue);
		if (!req) {
			ace->fsm_state = ACE_FSM_STATE_IDLE;
			break;
		}
		blk_start_request(req);

		/* Okay, it's a data request, set it up for transfer */
		dev_dbg(ace->dev,
			"request: sec=%llx hcnt=%x, ccnt=%x, dir=%i\n",
			(unsigned long long)blk_rq_pos(req),
			blk_rq_sectors(req), blk_rq_cur_sectors(req),
			rq_data_dir(req));

		ace->req = req;
		ace->data_ptr = req->buffer;
		ace->data_count = blk_rq_cur_sectors(req) * ACE_BUF_PER_SECTOR;
		ace_out32(ace, ACE_MPULBA, blk_rq_pos(req) & 0x0FFFFFFF);

		count = blk_rq_sectors(req);
		if (rq_data_dir(req)) {
			/* Kick off write request */
			dev_dbg(ace->dev, "write data\n");
			ace->fsm_task = ACE_TASK_WRITE;
			ace_out(ace, ACE_SECCNTCMD,
				count | ACE_SECCNTCMD_WRITE_DATA);
		} else {
			/* Kick off read request */
			dev_dbg(ace->dev, "read data\n");
			ace->fsm_task = ACE_TASK_READ;
			ace_out(ace, ACE_SECCNTCMD,
				count | ACE_SECCNTCMD_READ_DATA);
		}

		/* As per datasheet, put config controller in reset */
		val = ace_in(ace, ACE_CTRL);
		ace_out(ace, ACE_CTRL, val | ACE_CTRL_CFGRESET);

		/* Move to the transfer state.  The systemace will raise
		 * an interrupt once there is something to do
		 */
		ace->fsm_state = ACE_FSM_STATE_REQ_TRANSFER;
		if (ace->fsm_task == ACE_TASK_READ)
			ace_fsm_yieldirq(ace);	/* wait for data ready */
		break;

	case ACE_FSM_STATE_REQ_TRANSFER:
		/* Check that the sysace is ready to receive data */
		status = ace_in32(ace, ACE_STATUS);
		if (status & ACE_STATUS_CFBSY) {
			dev_dbg(ace->dev,
				"CFBSY set; t=%i iter=%i c=%i dc=%i irq=%i\n",
				ace->fsm_task, ace->fsm_iter_num,
				blk_rq_cur_sectors(ace->req) * 16,
				ace->data_count, ace->in_irq);
			ace_fsm_yield(ace);	/* need to poll CFBSY bit */
			break;
		}
		if (!(status & ACE_STATUS_DATABUFRDY)) {
			dev_dbg(ace->dev,
				"DATABUF not set; t=%i iter=%i c=%i dc=%i irq=%i\n",
				ace->fsm_task, ace->fsm_iter_num,
				blk_rq_cur_sectors(ace->req) * 16,
				ace->data_count, ace->in_irq);
			ace_fsm_yieldirq(ace);
			break;
		}

		/* Transfer the next buffer */
		if (ace->fsm_task == ACE_TASK_WRITE)
			ace->reg_ops->dataout(ace);
		else
			ace->reg_ops->datain(ace);
		ace->data_count--;

		/* If there are still buffers to be transfers; jump out here */
		if (ace->data_count != 0) {
			ace_fsm_yieldirq(ace);
			break;
		}

		/* bio finished; is there another one? */
		if (__blk_end_request_cur(ace->req, 0)) {
			/* dev_dbg(ace->dev, "next block; h=%u c=%u\n",
			 *      blk_rq_sectors(ace->req),
			 *      blk_rq_cur_sectors(ace->req));
			 */
			ace->data_ptr = ace->req->buffer;
			ace->data_count = blk_rq_cur_sectors(ace->req) * 16;
			ace_fsm_yieldirq(ace);
			break;
		}

		ace->fsm_state = ACE_FSM_STATE_REQ_COMPLETE;
		break;

	case ACE_FSM_STATE_REQ_COMPLETE:
		ace->req = NULL;

		/* Finished request; go to idle state */
		ace->fsm_state = ACE_FSM_STATE_IDLE;
		break;

	default:
		ace->fsm_state = ACE_FSM_STATE_IDLE;
		break;
	}
}
Exemplo n.º 30
0
static int virtio_queue_rq(struct blk_mq_hw_ctx *hctx,
			   const struct blk_mq_queue_data *bd)
{
	struct virtio_blk *vblk = hctx->queue->queuedata;
	struct request *req = bd->rq;
	struct virtblk_req *vbr = blk_mq_rq_to_pdu(req);
	unsigned long flags;
	unsigned int num;
	int qid = hctx->queue_num;
	int err;
	bool notify = false;
	u32 type;

	BUG_ON(req->nr_phys_segments + 2 > vblk->sg_elems);

	switch (req_op(req)) {
	case REQ_OP_READ:
	case REQ_OP_WRITE:
		type = 0;
		break;
	case REQ_OP_FLUSH:
		type = VIRTIO_BLK_T_FLUSH;
		break;
	case REQ_OP_SCSI_IN:
	case REQ_OP_SCSI_OUT:
		type = VIRTIO_BLK_T_SCSI_CMD;
		break;
	case REQ_OP_DRV_IN:
		type = VIRTIO_BLK_T_GET_ID;
		break;
	default:
		WARN_ON_ONCE(1);
		return BLK_MQ_RQ_QUEUE_ERROR;
	}

	vbr->out_hdr.type = cpu_to_virtio32(vblk->vdev, type);
	vbr->out_hdr.sector = type ?
		0 : cpu_to_virtio64(vblk->vdev, blk_rq_pos(req));
	vbr->out_hdr.ioprio = cpu_to_virtio32(vblk->vdev, req_get_ioprio(req));

	blk_mq_start_request(req);

	num = blk_rq_map_sg(hctx->queue, req, vbr->sg);
	if (num) {
		if (rq_data_dir(req) == WRITE)
			vbr->out_hdr.type |= cpu_to_virtio32(vblk->vdev, VIRTIO_BLK_T_OUT);
		else
			vbr->out_hdr.type |= cpu_to_virtio32(vblk->vdev, VIRTIO_BLK_T_IN);
	}

	spin_lock_irqsave(&vblk->vqs[qid].lock, flags);
	if (req_op(req) == REQ_OP_SCSI_IN || req_op(req) == REQ_OP_SCSI_OUT)
		err = virtblk_add_req_scsi(vblk->vqs[qid].vq, vbr, vbr->sg, num);
	else
		err = virtblk_add_req(vblk->vqs[qid].vq, vbr, vbr->sg, num);
	if (err) {
		virtqueue_kick(vblk->vqs[qid].vq);
		blk_mq_stop_hw_queue(hctx);
		spin_unlock_irqrestore(&vblk->vqs[qid].lock, flags);
		/* Out of mem doesn't actually happen, since we fall back
		 * to direct descriptors */
		if (err == -ENOMEM || err == -ENOSPC)
			return BLK_MQ_RQ_QUEUE_BUSY;
		return BLK_MQ_RQ_QUEUE_ERROR;
	}

	if (bd->last && virtqueue_kick_prepare(vblk->vqs[qid].vq))
		notify = true;
	spin_unlock_irqrestore(&vblk->vqs[qid].lock, flags);

	if (notify)
		virtqueue_notify(vblk->vqs[qid].vq);
	return BLK_MQ_RQ_QUEUE_OK;
}