Beispiel #1
0
unsigned int mmc_queue_map_sg(struct mmc_queue *mq)
{
	unsigned int sg_len;

	if (!mq->bounce_buf)
		return blk_rq_map_sg(mq->queue, mq->req, mq->sg);

	BUG_ON(!mq->bounce_sg);

	sg_len = blk_rq_map_sg(mq->queue, mq->req, mq->bounce_sg);

	mq->bounce_sg_len = sg_len;

	/*
	 * Shortcut in the event we only get a single entry.
	 */
	if (sg_len == 1) {
		memcpy(mq->sg, mq->bounce_sg, sizeof(struct scatterlist));
		return 1;
	}

	mq->sg[0].page = virt_to_page(mq->bounce_buf);
	mq->sg[0].offset = offset_in_page(mq->bounce_buf);
	mq->sg[0].length = 0;

	while (sg_len) {
		mq->sg[0].length += mq->bounce_sg[sg_len - 1].length;
		sg_len--;
	}

	return 1;
}
/**
* @brief 	Request service function.
* @param 	sd[in]: Card information.
* @param 	req[in]: Start sector.
* @return 	SUCCESS/ERROR_ID.
*/ 
static int gp_sdcard_xfer_request(gpSDInfo_t *sd, struct request *req)
{
	int ret = 1;

	while (ret) 
	{
		unsigned int ln;
		
		ln = blk_rq_map_sg(sd->queue, req, sd->sg);
		
		ret = gp_sdcard_transfer_scatter(sd, blk_rq_pos(req), sd->sg, ln, rq_data_dir(req));
		if(ret<0)
			goto out_error;
		/* ----- End of request ----- */
		spin_lock_irq(&sd->lock);
		ret = __blk_end_request(req, 0, ret<<9);
		spin_unlock_irq(&sd->lock);
	}
	return 1;
out_error:
	spin_lock_irq(&sd->lock);
	__blk_end_request_all(req, ret);;
	spin_unlock_irq(&sd->lock);
	return 0;
}
Beispiel #3
0
void ide_map_sg(ide_drive_t *drive, struct ide_cmd *cmd)
{
	ide_hwif_t *hwif = drive->hwif;
	struct scatterlist *sg = hwif->sg_table;
	struct request *rq = cmd->rq;

	cmd->sg_nents = blk_rq_map_sg(drive->queue, rq, sg);
}
Beispiel #4
0
void ide_map_sg(ide_drive_t *drive, struct request *rq)
{
	ide_hwif_t *hwif = drive->hwif;
	struct scatterlist *sg = hwif->sg_table;

	if (rq->cmd_type != REQ_TYPE_ATA_TASKFILE) {
		hwif->sg_nents = blk_rq_map_sg(drive->queue, rq, sg);
	} else {
		sg_init_one(sg, rq->buffer, rq->nr_sectors * SECTOR_SIZE);
		hwif->sg_nents = 1;
	}
}
Beispiel #5
0
/*
 * Prepare the sg list(s) to be handed of to the host driver
 */
unsigned int mmc_queue_map_sg(struct mmc_queue *mq, struct mmc_queue_req *mqrq)
{
	unsigned int sg_len;
	size_t buflen;
	struct scatterlist *sg;
	int i;

	if (!mqrq->bounce_buf)
		return blk_rq_map_sg(mq->queue, mqrq->req, mqrq->sg);

	sg_len = blk_rq_map_sg(mq->queue, mqrq->req, mqrq->bounce_sg);

	mqrq->bounce_sg_len = sg_len;

	buflen = 0;
	for_each_sg(mqrq->bounce_sg, sg, sg_len, i)
		buflen += sg->length;

	sg_init_one(mqrq->sg, mqrq->bounce_buf, buflen);

	return 1;
}
static int bsg_map_buffer(struct bsg_buffer *buf, struct request *req)
{
	size_t sz = (sizeof(struct scatterlist) * req->nr_phys_segments);

	BUG_ON(!req->nr_phys_segments);

	buf->sg_list = kzalloc(sz, GFP_KERNEL);
	if (!buf->sg_list)
		return -ENOMEM;
	sg_init_table(buf->sg_list, req->nr_phys_segments);
	buf->sg_cnt = blk_rq_map_sg(req->q, req, buf->sg_list);
	buf->payload_len = blk_rq_bytes(req);
	return 0;
}
/*
 * Prepare the sg list(s) to be handed of to the host driver
 */
static unsigned int card_queue_map_sg(struct card_queue *cq)
{
	unsigned int sg_len;
	size_t buflen;
	struct scatterlist *sg;
	int i;

	if (!cq->bounce_buf)
		return blk_rq_map_sg(cq->queue, cq->req, cq->sg);

	BUG_ON(!cq->bounce_sg);

	sg_len = blk_rq_map_sg(cq->queue, cq->req, cq->bounce_sg);

	cq->bounce_sg_len = sg_len;

	buflen = 0;
	for_each_sg(cq->bounce_sg, sg, sg_len, i)
		buflen += sg->length;

	sg_init_one(cq->sg, cq->bounce_buf, buflen);

	return 1;
}
Beispiel #8
0
int nbdx_rq_map_sg(struct request *rq, struct xio_vmsg *vmsg,
		    unsigned long long *len)
{
	if (unlikely(vmsg->data_tbl.orig_nents < rq->nr_phys_segments)) {
		pr_err("unsupported sg table size\n");
		return -ENOMEM;
	}
	sg_init_table(vmsg->data_tbl.sgl, rq->nr_phys_segments);
	vmsg->data_tbl.nents = blk_rq_map_sg(rq->q, rq, vmsg->data_tbl.sgl);
	if (unlikely(vmsg->data_tbl.nents <= 0)) {
		pr_err("mapped %d sg nents\n", vmsg->data_tbl.nents);
		return -EINVAL;
	}

	*len = blk_rq_bytes(rq);

	return 0;
}
/* issue astoria blkdev request (issue_fn) */
static int cyasblkdev_blk_issue_rq(
					struct cyasblkdev_queue *bq,
					struct request *req
					)
{
	struct cyasblkdev_blk_data *bd = bq->data;
	int index = 0;
	int ret = CY_AS_ERROR_SUCCESS;
	uint32_t req_sector = 0;
	uint32_t req_nr_sectors = 0;
	int bus_num = 0;
	int lcl_unit_no = 0;

	DBGPRN_FUNC_NAME;

	/*
	 * will construct a scatterlist for the given request;
	 * the return value is the number of actually used
	 * entries in the resulting list. Then, this scatterlist
	 * can be used for the actual DMA prep operation.
	 */
	spin_lock_irq(&bd->lock);
	index = blk_rq_map_sg(bq->queue, req, bd->sg);

	if (req->rq_disk == bd->user_disk_0) {
		bus_num = bd->user_disk_0_bus_num;
		req_sector = blk_rq_pos(req) + gl_bd->user_disk_0_first_sector;
		req_nr_sectors = blk_rq_sectors(req);
		lcl_unit_no = gl_bd->user_disk_0_unit_no;

		#ifndef WESTBRIDGE_NDEBUG
		cy_as_hal_print_message("%s: request made to disk 0 "
			"for sector=%d, num_sectors=%d, unit_no=%d\n",
			__func__, req_sector, (int) blk_rq_sectors(req),
			lcl_unit_no);
		#endif
	} else if (req->rq_disk == bd->user_disk_1) {
		bus_num = bd->user_disk_1_bus_num;
		req_sector = blk_rq_pos(req) + gl_bd->user_disk_1_first_sector;
		/*SECT_NUM_TRANSLATE(blk_rq_sectors(req));*/
		req_nr_sectors = blk_rq_sectors(req);
		lcl_unit_no = gl_bd->user_disk_1_unit_no;

		#ifndef WESTBRIDGE_NDEBUG
		cy_as_hal_print_message("%s: request made to disk 1 for "
			"sector=%d, num_sectors=%d, unit_no=%d\n", __func__,
			req_sector, (int) blk_rq_sectors(req), lcl_unit_no);
		#endif
	} else if (req->rq_disk == bd->system_disk) {
		bus_num = bd->system_disk_bus_num;
		req_sector = blk_rq_pos(req) + gl_bd->system_disk_first_sector;
		req_nr_sectors = blk_rq_sectors(req);
		lcl_unit_no = gl_bd->system_disk_unit_no;

		#ifndef WESTBRIDGE_NDEBUG
		cy_as_hal_print_message("%s: request made to system disk "
			"for sector=%d, num_sectors=%d, unit_no=%d\n", __func__,
			req_sector, (int) blk_rq_sectors(req), lcl_unit_no);
		#endif
	}
	#ifndef WESTBRIDGE_NDEBUG
	else {
		cy_as_hal_print_message(
			"%s: invalid disk used for request\n", __func__);
	}
	#endif

	spin_unlock_irq(&bd->lock);

	if (rq_data_dir(req) == READ) {
		#ifndef WESTBRIDGE_NDEBUG
		cy_as_hal_print_message("%s: calling readasync() "
			"req_sector=0x%x, req_nr_sectors=0x%x, bd->sg:%x\n\n",
			__func__, req_sector, req_nr_sectors, (uint32_t)bd->sg);
		#endif

		ret = cy_as_storage_read_async(bd->dev_handle, bus_num, 0,
			lcl_unit_no, req_sector, bd->sg, req_nr_sectors,
			(cy_as_storage_callback)cyasblkdev_issuecallback);

		if (ret != CY_AS_ERROR_SUCCESS) {
			#ifndef WESTBRIDGE_NDEBUG
			cy_as_hal_print_message("%s:readasync() error %d at "
				"address %ld, unit no %d\n", __func__, ret,
				blk_rq_pos(req), lcl_unit_no);
			cy_as_hal_print_message("%s:ending i/o request "
				"on reg:%x\n", __func__, (uint32_t)req);
			#endif

			while (blk_end_request(req,
				(ret == CY_AS_ERROR_SUCCESS),
				req_nr_sectors*512))
				;

			bq->req = NULL;
		}
	} else {
		ret = cy_as_storage_write_async(bd->dev_handle, bus_num, 0,
			lcl_unit_no, req_sector, bd->sg, req_nr_sectors,
			(cy_as_storage_callback)cyasblkdev_issuecallback);

		if (ret != CY_AS_ERROR_SUCCESS) {
			#ifndef WESTBRIDGE_NDEBUG
			cy_as_hal_print_message("%s: write failed with "
			"error %d at address %ld, unit no %d\n",
			__func__, ret, blk_rq_pos(req), lcl_unit_no);
			#endif

			/*end IO op on this request(does both
			 * end_that_request_... _first & _last) */
			while (blk_end_request(req,
				(ret == CY_AS_ERROR_SUCCESS),
				req_nr_sectors*512))
				;

			bq->req = NULL;
		}
	}

	return ret;
}
Beispiel #10
0
static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req)
{
	struct mmc_blk_data *md = mq->data;
	struct mmc_card *card = md->queue.card;
	struct mmc_blk_request brq;
	int ret = 1;

	if (mmc_card_claim_host(card))
		goto flush_queue;

	do {
		struct mmc_command cmd;
		u32 readcmd, writecmd;

		memset(&brq, 0, sizeof(struct mmc_blk_request));
		brq.mrq.cmd = &brq.cmd;
		brq.mrq.data = &brq.data;

		brq.cmd.arg = req->sector;
		if (!mmc_card_blockaddr(card))
			brq.cmd.arg <<= 9;
		brq.cmd.flags = MMC_RSP_R1 | MMC_CMD_ADTC;
		brq.data.blksz = 1 << md->block_bits;
		brq.stop.opcode = MMC_STOP_TRANSMISSION;
		brq.stop.arg = 0;
		brq.stop.flags = MMC_RSP_R1B | MMC_CMD_AC;
		brq.data.blocks = req->nr_sectors >> (md->block_bits - 9);
		if (brq.data.blocks > card->host->max_blk_count)
			brq.data.blocks = card->host->max_blk_count;

		mmc_set_data_timeout(&brq.data, card, rq_data_dir(req) != READ);

#ifdef CONFIG_MMC_SUPPORT_MOVINAND
		if (mmc_card_movinand(card)) {
			if ((brq.data.blocks > 1) || (rq_data_dir(req) == WRITE)) {
				cmd.opcode = MMC_SET_BLOCK_COUNT;
				cmd.arg = req->nr_sectors;
				cmd.flags = MMC_RSP_R1;
				ret = mmc_wait_for_cmd(card->host, &cmd, 2);
			}
			if (rq_data_dir(req) == READ) {
				if (brq.data.blocks > 1) {
					brq.cmd.opcode = MMC_READ_MULTIPLE_BLOCK;
					brq.data.flags |= (MMC_DATA_READ | MMC_DATA_MULTI);
//					brq.mrq.stop = &brq.stop;
				} else {
					brq.cmd.opcode = MMC_READ_SINGLE_BLOCK;
					brq.data.flags |= MMC_DATA_READ;
					brq.mrq.stop = NULL;
				}
			} else {
				brq.cmd.opcode = MMC_WRITE_MULTIPLE_BLOCK;
				brq.data.flags |= MMC_DATA_WRITE | MMC_DATA_MULTI;
//				brq.mrq.stop = &brq.stop;
			}
		} else {
#endif

		/*
		 * If the host doesn't support multiple block writes, force
		 * block writes to single block. SD cards are excepted from
		 * this rule as they support querying the number of
		 * successfully written sectors.
		 */
		if (rq_data_dir(req) != READ &&
		    !(card->host->caps & MMC_CAP_MULTIWRITE) &&
		    !mmc_card_sd(card))
			brq.data.blocks = 1;

		if (brq.data.blocks > 1) {
			brq.data.flags |= MMC_DATA_MULTI;
			brq.mrq.stop = &brq.stop;
			readcmd = MMC_READ_MULTIPLE_BLOCK;
			writecmd = MMC_WRITE_MULTIPLE_BLOCK;
		} else {
			brq.mrq.stop = NULL;
			readcmd = MMC_READ_SINGLE_BLOCK;
			writecmd = MMC_WRITE_BLOCK;
		}

		if (rq_data_dir(req) == READ) {
			brq.cmd.opcode = readcmd;
			brq.data.flags |= MMC_DATA_READ;
		} else {
			brq.cmd.opcode = writecmd;
			brq.data.flags |= MMC_DATA_WRITE;
		}
#ifdef CONFIG_MMC_SUPPORT_MOVINAND
		}
#endif

		brq.data.sg = mq->sg;
		brq.data.sg_len = blk_rq_map_sg(req->q, req, brq.data.sg);

		mmc_wait_for_req(card->host, &brq.mrq);
		if (brq.cmd.error) {
			printk(KERN_ERR "%s: error %d sending read/write command\n",
			       req->rq_disk->disk_name, brq.cmd.error);
			goto cmd_err;
		}

		if (brq.data.error) {
			printk(KERN_ERR "%s: error %d transferring data\n",
			       req->rq_disk->disk_name, brq.data.error);
			goto cmd_err;
		}

		if (brq.stop.error) {
			printk(KERN_ERR "%s: error %d sending stop command\n",
			       req->rq_disk->disk_name, brq.stop.error);
			goto cmd_err;
		}

		if (rq_data_dir(req) != READ) {
			do {
				int err;

				cmd.opcode = MMC_SEND_STATUS;
				cmd.arg = card->rca << 16;
				cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
				err = mmc_wait_for_cmd(card->host, &cmd, 5);
				if (err) {
					printk(KERN_ERR "%s: error %d requesting status\n",
					       req->rq_disk->disk_name, err);
					goto cmd_err;
				}
#ifdef CONFIG_MMC_SUPPORT_MOVINAND
				/* Work-around for broken cards setting READY_FOR_DATA
				 * when not actually ready.
				 */
				if (mmc_card_movinand(card)) {
					if (R1_CURRENT_STATE(cmd.resp[0]) == 7)
						cmd.resp[0] &= ~R1_READY_FOR_DATA;
				}
#endif
			} while (!(cmd.resp[0] & R1_READY_FOR_DATA));

#if 0
			if (cmd.resp[0] & ~0x00000900)
				printk(KERN_ERR "%s: status = %08x\n",
				       req->rq_disk->disk_name, cmd.resp[0]);
			if (mmc_decode_status(cmd.resp))
				goto cmd_err;
#endif
		}

		/*
		 * A block was successfully transferred.
		 */
		spin_lock_irq(&md->lock);
		ret = end_that_request_chunk(req, 1, brq.data.bytes_xfered);
		if (!ret) {
			/*
			 * The whole request completed successfully.
			 */
			add_disk_randomness(req->rq_disk);
			blkdev_dequeue_request(req);
			end_that_request_last(req, 1);
		}
		spin_unlock_irq(&md->lock);
	} while (ret);

	mmc_card_release_host(card);

	return 1;

 cmd_err:
 	/*
 	 * If this is an SD card and we're writing, we can first
 	 * mark the known good sectors as ok.
 	 *
	 * If the card is not SD, we can still ok written sectors
	 * if the controller can do proper error reporting.
	 *
	 * For reads we just fail the entire chunk as that should
	 * be safe in all cases.
	 */
 	if (rq_data_dir(req) != READ && mmc_card_sd(card)) {
		u32 blocks;
		unsigned int bytes;

		blocks = mmc_sd_num_wr_blocks(card);
		if (blocks != (u32)-1) {
			if (card->csd.write_partial)
				bytes = blocks << md->block_bits;
			else
				bytes = blocks << 9;
			spin_lock_irq(&md->lock);
			ret = end_that_request_chunk(req, 1, bytes);
			spin_unlock_irq(&md->lock);
		}
	} else if (rq_data_dir(req) != READ &&
		   (card->host->caps & MMC_CAP_MULTIWRITE)) {
		spin_lock_irq(&md->lock);
		ret = end_that_request_chunk(req, 1, brq.data.bytes_xfered);
		spin_unlock_irq(&md->lock);
	}

flush_queue:

	mmc_card_release_host(card);

	spin_lock_irq(&md->lock);
	while (ret) {
		ret = end_that_request_chunk(req, 0,
				req->current_nr_sectors << 9);
	}

	add_disk_randomness(req->rq_disk);
	blkdev_dequeue_request(req);
	end_that_request_last(req, 0);
	spin_unlock_irq(&md->lock);

	return 0;
}
Beispiel #11
0
static int virtio_queue_rq(struct blk_mq_hw_ctx *hctx,
			   const struct blk_mq_queue_data *bd)
{
	struct virtio_blk *vblk = hctx->queue->queuedata;
	struct request *req = bd->rq;
	struct virtblk_req *vbr = blk_mq_rq_to_pdu(req);
	unsigned long flags;
	unsigned int num;
	int qid = hctx->queue_num;
	int err;
	bool notify = false;
	u32 type;

	BUG_ON(req->nr_phys_segments + 2 > vblk->sg_elems);

	switch (req_op(req)) {
	case REQ_OP_READ:
	case REQ_OP_WRITE:
		type = 0;
		break;
	case REQ_OP_FLUSH:
		type = VIRTIO_BLK_T_FLUSH;
		break;
	case REQ_OP_SCSI_IN:
	case REQ_OP_SCSI_OUT:
		type = VIRTIO_BLK_T_SCSI_CMD;
		break;
	case REQ_OP_DRV_IN:
		type = VIRTIO_BLK_T_GET_ID;
		break;
	default:
		WARN_ON_ONCE(1);
		return BLK_MQ_RQ_QUEUE_ERROR;
	}

	vbr->out_hdr.type = cpu_to_virtio32(vblk->vdev, type);
	vbr->out_hdr.sector = type ?
		0 : cpu_to_virtio64(vblk->vdev, blk_rq_pos(req));
	vbr->out_hdr.ioprio = cpu_to_virtio32(vblk->vdev, req_get_ioprio(req));

	blk_mq_start_request(req);

	num = blk_rq_map_sg(hctx->queue, req, vbr->sg);
	if (num) {
		if (rq_data_dir(req) == WRITE)
			vbr->out_hdr.type |= cpu_to_virtio32(vblk->vdev, VIRTIO_BLK_T_OUT);
		else
			vbr->out_hdr.type |= cpu_to_virtio32(vblk->vdev, VIRTIO_BLK_T_IN);
	}

	spin_lock_irqsave(&vblk->vqs[qid].lock, flags);
	if (req_op(req) == REQ_OP_SCSI_IN || req_op(req) == REQ_OP_SCSI_OUT)
		err = virtblk_add_req_scsi(vblk->vqs[qid].vq, vbr, vbr->sg, num);
	else
		err = virtblk_add_req(vblk->vqs[qid].vq, vbr, vbr->sg, num);
	if (err) {
		virtqueue_kick(vblk->vqs[qid].vq);
		blk_mq_stop_hw_queue(hctx);
		spin_unlock_irqrestore(&vblk->vqs[qid].lock, flags);
		/* Out of mem doesn't actually happen, since we fall back
		 * to direct descriptors */
		if (err == -ENOMEM || err == -ENOSPC)
			return BLK_MQ_RQ_QUEUE_BUSY;
		return BLK_MQ_RQ_QUEUE_ERROR;
	}

	if (bd->last && virtqueue_kick_prepare(vblk->vqs[qid].vq))
		notify = true;
	spin_unlock_irqrestore(&vblk->vqs[qid].lock, flags);

	if (notify)
		virtqueue_notify(vblk->vqs[qid].vq);
	return BLK_MQ_RQ_QUEUE_OK;
}
Beispiel #12
0
/**
* @brief 	Request service function.
* @param 	sd[in]: Card information.
* @param 	req[in]: Start sector.
* @return 	SUCCESS/ERROR_ID.
*/
static int gp_sdcard_xfer_request(gpSDInfo_t *sd, struct request *req)
{
	int ret = 1;

	while (ret)
	{
		unsigned int ln;
		unsigned int retry = 0;

		ln = blk_rq_map_sg(sd->queue, req, sd->sg);

#if 0	/* This is used for usb disk check */
		{
			bool do_sync = (rq_is_sync(req) && rq_data_dir(req) == WRITE);
			if (do_sync)
			{
				DEBUG("[Jerry] detect do write sync\n");
			}
		}
#endif
		while(1)
		{
			ret = gp_sdcard_transfer_scatter(sd, blk_rq_pos(req), sd->sg, ln, rq_data_dir(req));
			/* ----- Re-try procedure ----- */
			if(ret<0)
			{
				unsigned int cid[4];
				unsigned int capacity;
				if((retry>=SD_RETRY)||(gp_sdcard_ckinsert(sd)==0)||sd->fremove)
					goto out_error;
				/* ----- Re-initialize sd card ----- */
				memcpy(cid, sd->CID, sizeof(cid));
				capacity = sd->capacity;
				if(gp_sdcard_cardinit(sd)!=0)
				{
					DERROR("[%d]: Re-initialize fail\n",sd->device_id);
					goto out_error;
				}
				else if((cid[0]!=sd->CID[0])||(cid[1]!=sd->CID[1])||(cid[2]!=sd->CID[2])||(cid[3]!=sd->CID[3])||(capacity!=sd->capacity))
				{
					DERROR("[%d]: Different card insert\n",sd->device_id);
					goto out_error;
				}
				retry ++;
			}
			else
				break;
		}
		/* ----- End of request ----- */
		spin_lock_irq(&sd->lock);
		ret = __blk_end_request(req, 0, ret<<9);
		spin_unlock_irq(&sd->lock);
	}
	return 1;
out_error:
	spin_lock_irq(&sd->lock);
	DEBUG("[%d]: txrx fail %d\n", sd->device_id, ret);
	__blk_end_request_all(req, ret);;
	spin_unlock_irq(&sd->lock);
	return -ENXIO;
}
Beispiel #13
0
static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req)
{
	struct mmc_blk_data *md = mq->data;
	struct mmc_card *card = md->queue.card;
	int ret;

	if (mmc_card_claim_host(card))
		goto cmd_err;

	do {
		struct mmc_blk_request brq;
		struct mmc_command cmd;

		memset(&brq, 0, sizeof(struct mmc_blk_request));
		brq.mrq.cmd = &brq.cmd;
		brq.mrq.data = &brq.data;

		brq.cmd.arg = req->sector << 9;
		brq.cmd.flags = MMC_RSP_R1;
		brq.data.timeout_ns = card->csd.tacc_ns * 10;
		brq.data.timeout_clks = card->csd.tacc_clks * 10;
		brq.data.blksz_bits = md->block_bits;
		brq.data.blocks = req->nr_sectors >> (md->block_bits - 9);
		brq.stop.opcode = MMC_STOP_TRANSMISSION;
		brq.stop.arg = 0;
		brq.stop.flags = MMC_RSP_R1B;

		if (rq_data_dir(req) == READ) {
			brq.cmd.opcode = brq.data.blocks > 1 ? MMC_READ_MULTIPLE_BLOCK : MMC_READ_SINGLE_BLOCK;
			brq.data.flags |= MMC_DATA_READ;
		} else {
			brq.cmd.opcode = MMC_WRITE_BLOCK;
			brq.cmd.flags = MMC_RSP_R1B;
			brq.data.flags |= MMC_DATA_WRITE;
			brq.data.blocks = 1;
		}
		brq.mrq.stop = brq.data.blocks > 1 ? &brq.stop : NULL;

		brq.data.sg = mq->sg;
		brq.data.sg_len = blk_rq_map_sg(req->q, req, brq.data.sg);

		mmc_wait_for_req(card->host, &brq.mrq);
		if (brq.cmd.error) {
			printk(KERN_ERR "%s: error %d sending read/write command\n",
			       req->rq_disk->disk_name, brq.cmd.error);
			goto cmd_err;
		}

		if (brq.data.error) {
			printk(KERN_ERR "%s: error %d transferring data\n",
			       req->rq_disk->disk_name, brq.data.error);
			goto cmd_err;
		}

		if (brq.stop.error) {
			printk(KERN_ERR "%s: error %d sending stop command\n",
			       req->rq_disk->disk_name, brq.stop.error);
			goto cmd_err;
		}

		do {
			int err;

			cmd.opcode = MMC_SEND_STATUS;
			cmd.arg = card->rca << 16;
			cmd.flags = MMC_RSP_R1;
			err = mmc_wait_for_cmd(card->host, &cmd, 5);
			if (err) {
				printk(KERN_ERR "%s: error %d requesting status\n",
				       req->rq_disk->disk_name, err);
				goto cmd_err;
			}
		} while (!(cmd.resp[0] & R1_READY_FOR_DATA));

#if 0
		if (cmd.resp[0] & ~0x00000900)
			printk(KERN_ERR "%s: status = %08x\n",
			       req->rq_disk->disk_name, cmd.resp[0]);
		if (mmc_decode_status(cmd.resp))
			goto cmd_err;
#endif

		/*
		 * A block was successfully transferred.
		 */
		spin_lock_irq(&md->lock);
		ret = end_that_request_chunk(req, 1, brq.data.bytes_xfered);
		if (!ret) {
			/*
			 * The whole request completed successfully.
			 */
			add_disk_randomness(req->rq_disk);
			blkdev_dequeue_request(req);
			end_that_request_last(req);
		}
		spin_unlock_irq(&md->lock);
	} while (ret);

	mmc_card_release_host(card);

	return 1;

 cmd_err:
	mmc_card_release_host(card);

	/*
	 * This is a little draconian, but until we get proper
	 * error handling sorted out here, its the best we can
	 * do - especially as some hosts have no idea how much
	 * data was transferred before the error occurred.
	 */
	spin_lock_irq(&md->lock);
	do {
		ret = end_that_request_chunk(req, 0,
				req->current_nr_sectors << 9);
	} while (ret);

	add_disk_randomness(req->rq_disk);
	blkdev_dequeue_request(req);
	end_that_request_last(req);
	spin_unlock_irq(&md->lock);

	return 0;
}
/*
 * Thread for srb
 */
static int srb_thread(void *data)
{
	struct srb_device_s *dev;
	struct request *req;
	unsigned long flags;
	int th_id;
	int th_ret = 0;
	char buff[256];
	struct req_iterator iter;
#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 14, 0)
	struct bio_vec *bvec;
#else
	struct bio_vec bvec;
#endif
	struct srb_cdmi_desc_s *cdmi_desc;

	SRBDEV_LOG_DEBUG(((struct srb_device_s *)data), "Thread started with device %p", data);

	dev = data;

	/* Init thread specific values */
	spin_lock(&devtab_lock);
	th_id = dev->nb_threads;
	dev->nb_threads++;
	spin_unlock(&devtab_lock);

	set_user_nice(current, -20);
	while (!kthread_should_stop() || !list_empty(&dev->waiting_queue)) {
		/* wait for something to do */
		wait_event_interruptible(dev->waiting_wq,
					kthread_should_stop() ||
					!list_empty(&dev->waiting_queue));

		/* TODO: improve kthread termination, otherwise calling we can not
		  terminate a kthread calling kthread_stop() */
		/* if (kthread_should_stop()) {
			printk(KERN_INFO "srb_thread: immediate kthread exit\n");
			do_exit(0);
		} */

		spin_lock_irqsave(&dev->waiting_lock, flags);
		/* extract request */
		if (list_empty(&dev->waiting_queue)) {
			spin_unlock_irqrestore(&dev->waiting_lock, flags);
			continue;
		}
		req = list_entry(dev->waiting_queue.next, struct request,
				queuelist);
		list_del_init(&req->queuelist);
		spin_unlock_irqrestore(&dev->waiting_lock, flags);
		
		if (blk_rq_sectors(req) == 0) {
			blk_end_request_all(req, 0);
			continue;
		}

		req_flags_to_str(req->cmd_flags, buff);
		SRBDEV_LOG_DEBUG(dev, "thread %d: New REQ of type %s (%d) flags: %s (%llu)",
				 th_id, req_code_to_str(rq_data_dir(req)), rq_data_dir(req), buff,
                                 (unsigned long long)req->cmd_flags);
		if (req->cmd_flags & REQ_FLUSH) {
			SRBDEV_LOG_DEBUG(dev, "DEBUG CMD REQ_FLUSH\n");
		}
		/* XXX: Use iterator instead of internal function (cf linux/blkdev.h)
		 *  __rq_for_each_bio(bio, req) {
		 */
		rq_for_each_segment(bvec, req, iter) {
			if (iter.bio->bi_rw & REQ_FLUSH) {
				SRBDEV_LOG_DEBUG(dev, "DEBUG VR BIO REQ_FLUSH\n");
			}
		}

		/* Create scatterlist */
		cdmi_desc = dev->thread_cdmi_desc[th_id];
		sg_init_table(dev->thread_cdmi_desc[th_id]->sgl, DEV_NB_PHYS_SEGS);
		dev->thread_cdmi_desc[th_id]->sgl_size = blk_rq_map_sg(dev->q, req, dev->thread_cdmi_desc[th_id]->sgl);

		SRBDEV_LOG_DEBUG(dev, "scatter_list size %d [nb_seg = %d,"
		                 " sector = %lu, nr_sectors=%u w=%d]",
		                 DEV_NB_PHYS_SEGS,
		                 dev->thread_cdmi_desc[th_id]->sgl_size,
		                 blk_rq_pos(req), blk_rq_sectors(req),
		                 rq_data_dir(req) == WRITE);

		/* Call scatter function */
		th_ret = srb_xfer_scl(dev, dev->thread_cdmi_desc[th_id], req);

		SRBDEV_LOG_DEBUG(dev, "thread %d: REQ done with returned code %d",
		                 th_id, th_ret);
	
		/* No IO error testing for the moment */
		blk_end_request_all(req, 0);
	}

	return 0;
}
Beispiel #15
0
/*
 * Prepare the sg list(s) to be handed of to the host driver
 */
unsigned int mmc_queue_map_sg(struct mmc_queue *mq, struct mmc_queue_req *mqrq)
{
	struct request *req = mmc_queue_req_to_req(mqrq);

	return blk_rq_map_sg(mq->queue, req, mqrq->sg);
}
Beispiel #16
0
static int virtio_queue_rq(struct blk_mq_hw_ctx *hctx, struct request *req)
{
	struct virtio_blk *vblk = hctx->queue->queuedata;
	struct virtblk_req *vbr = req->special;
	unsigned long flags;
	unsigned int num;
	const bool last = (req->cmd_flags & REQ_END) != 0;
	int err;

	BUG_ON(req->nr_phys_segments + 2 > vblk->sg_elems);

	vbr->req = req;
	if (req->cmd_flags & REQ_FLUSH) {
		vbr->out_hdr.type = VIRTIO_BLK_T_FLUSH;
		vbr->out_hdr.sector = 0;
		vbr->out_hdr.ioprio = req_get_ioprio(vbr->req);
	} else {
		switch (req->cmd_type) {
		case REQ_TYPE_FS:
			vbr->out_hdr.type = 0;
			vbr->out_hdr.sector = blk_rq_pos(vbr->req);
			vbr->out_hdr.ioprio = req_get_ioprio(vbr->req);
			break;
		case REQ_TYPE_BLOCK_PC:
			vbr->out_hdr.type = VIRTIO_BLK_T_SCSI_CMD;
			vbr->out_hdr.sector = 0;
			vbr->out_hdr.ioprio = req_get_ioprio(vbr->req);
			break;
		case REQ_TYPE_SPECIAL:
			vbr->out_hdr.type = VIRTIO_BLK_T_GET_ID;
			vbr->out_hdr.sector = 0;
			vbr->out_hdr.ioprio = req_get_ioprio(vbr->req);
			break;
		default:
			/* We don't put anything else in the queue. */
			BUG();
		}
	}

	num = blk_rq_map_sg(hctx->queue, vbr->req, vbr->sg);
	if (num) {
		if (rq_data_dir(vbr->req) == WRITE)
			vbr->out_hdr.type |= VIRTIO_BLK_T_OUT;
		else
			vbr->out_hdr.type |= VIRTIO_BLK_T_IN;
	}

	spin_lock_irqsave(&vblk->vq_lock, flags);
	err = __virtblk_add_req(vblk->vq, vbr, vbr->sg, num);
	if (err) {
		virtqueue_kick(vblk->vq);
		blk_mq_stop_hw_queue(hctx);
		spin_unlock_irqrestore(&vblk->vq_lock, flags);
		/* Out of mem doesn't actually happen, since we fall back
		 * to direct descriptors */
		if (err == -ENOMEM || err == -ENOSPC)
			return BLK_MQ_RQ_QUEUE_BUSY;
		return BLK_MQ_RQ_QUEUE_ERROR;
	}

	if (last)
		virtqueue_kick(vblk->vq);

	spin_unlock_irqrestore(&vblk->vq_lock, flags);
	return BLK_MQ_RQ_QUEUE_OK;
}
Beispiel #17
0
static int virtio_queue_rq(struct blk_mq_hw_ctx *hctx,
			   const struct blk_mq_queue_data *bd)
{
	struct virtio_blk *vblk = hctx->queue->queuedata;
	struct request *req = bd->rq;
	struct virtblk_req *vbr = blk_mq_rq_to_pdu(req);
	unsigned long flags;
	unsigned int num;
	int qid = hctx->queue_num;
	int err;
	bool notify = false;

	BUG_ON(req->nr_phys_segments + 2 > vblk->sg_elems);

	vbr->req = req;
	if (req->cmd_flags & REQ_FLUSH) {
		vbr->out_hdr.type = cpu_to_virtio32(vblk->vdev, VIRTIO_BLK_T_FLUSH);
		vbr->out_hdr.sector = 0;
		vbr->out_hdr.ioprio = cpu_to_virtio32(vblk->vdev, req_get_ioprio(vbr->req));
	} else {
		switch (req->cmd_type) {
		case REQ_TYPE_FS:
			vbr->out_hdr.type = 0;
			vbr->out_hdr.sector = cpu_to_virtio64(vblk->vdev, blk_rq_pos(vbr->req));
			vbr->out_hdr.ioprio = cpu_to_virtio32(vblk->vdev, req_get_ioprio(vbr->req));
			break;
		case REQ_TYPE_BLOCK_PC:
			vbr->out_hdr.type = cpu_to_virtio32(vblk->vdev, VIRTIO_BLK_T_SCSI_CMD);
			vbr->out_hdr.sector = 0;
			vbr->out_hdr.ioprio = cpu_to_virtio32(vblk->vdev, req_get_ioprio(vbr->req));
			break;
		case REQ_TYPE_DRV_PRIV:
			vbr->out_hdr.type = cpu_to_virtio32(vblk->vdev, VIRTIO_BLK_T_GET_ID);
			vbr->out_hdr.sector = 0;
			vbr->out_hdr.ioprio = cpu_to_virtio32(vblk->vdev, req_get_ioprio(vbr->req));
			break;
		default:
			/* We don't put anything else in the queue. */
			BUG();
		}
	}

	blk_mq_start_request(req);

	num = blk_rq_map_sg(hctx->queue, vbr->req, vbr->sg);
	if (num) {
		if (rq_data_dir(vbr->req) == WRITE)
			vbr->out_hdr.type |= cpu_to_virtio32(vblk->vdev, VIRTIO_BLK_T_OUT);
		else
			vbr->out_hdr.type |= cpu_to_virtio32(vblk->vdev, VIRTIO_BLK_T_IN);
	}

	spin_lock_irqsave(&vblk->vqs[qid].lock, flags);
	err = __virtblk_add_req(vblk->vqs[qid].vq, vbr, vbr->sg, num);
	if (err) {
		virtqueue_kick(vblk->vqs[qid].vq);
		blk_mq_stop_hw_queue(hctx);
		spin_unlock_irqrestore(&vblk->vqs[qid].lock, flags);
		/* Out of mem doesn't actually happen, since we fall back
		 * to direct descriptors */
		if (err == -ENOMEM || err == -ENOSPC)
			return BLK_MQ_RQ_QUEUE_BUSY;
		return BLK_MQ_RQ_QUEUE_ERROR;
	}

	if (bd->last && virtqueue_kick_prepare(vblk->vqs[qid].vq))
		notify = true;
	spin_unlock_irqrestore(&vblk->vqs[qid].lock, flags);

	if (notify)
		virtqueue_notify(vblk->vqs[qid].vq);
	return BLK_MQ_RQ_QUEUE_OK;
}
Beispiel #18
0
static int skd_preop_sg_list(struct skd_device *skdev,
			     struct skd_request_context *skreq)
{
	struct request *req = skreq->req;
	int writing = skreq->sg_data_dir == SKD_DATA_DIR_HOST_TO_CARD;
	int pci_dir = writing ? PCI_DMA_TODEVICE : PCI_DMA_FROMDEVICE;
	struct scatterlist *sg = &skreq->sg[0];
	int n_sg;
	int i;

	skreq->sg_byte_count = 0;

	/* SKD_ASSERT(skreq->sg_data_dir == SKD_DATA_DIR_HOST_TO_CARD ||
		   skreq->sg_data_dir == SKD_DATA_DIR_CARD_TO_HOST); */

	n_sg = blk_rq_map_sg(skdev->queue, req, sg);
	if (n_sg <= 0)
		return -EINVAL;

	/*
	 * Map scatterlist to PCI bus addresses.
	 * Note PCI might change the number of entries.
	 */
	n_sg = pci_map_sg(skdev->pdev, sg, n_sg, pci_dir);
	if (n_sg <= 0)
		return -EINVAL;

	SKD_ASSERT(n_sg <= skdev->sgs_per_request);

	skreq->n_sg = n_sg;

	for (i = 0; i < n_sg; i++) {
		struct fit_sg_descriptor *sgd = &skreq->sksg_list[i];
		u32 cnt = sg_dma_len(&sg[i]);
		uint64_t dma_addr = sg_dma_address(&sg[i]);

		sgd->control = FIT_SGD_CONTROL_NOT_LAST;
		sgd->byte_count = cnt;
		skreq->sg_byte_count += cnt;
		sgd->host_side_addr = dma_addr;
		sgd->dev_side_addr = 0;
	}

	skreq->sksg_list[n_sg - 1].next_desc_ptr = 0LL;
	skreq->sksg_list[n_sg - 1].control = FIT_SGD_CONTROL_LAST;

	if (unlikely(skdev->dbg_level > 1)) {
		pr_debug("%s:%s:%d skreq=%x sksg_list=%p sksg_dma=%llx\n",
			 skdev->name, __func__, __LINE__,
			 skreq->id, skreq->sksg_list, skreq->sksg_dma_address);
		for (i = 0; i < n_sg; i++) {
			struct fit_sg_descriptor *sgd = &skreq->sksg_list[i];
			pr_debug("%s:%s:%d   sg[%d] count=%u ctrl=0x%x "
				 "addr=0x%llx next=0x%llx\n",
				 skdev->name, __func__, __LINE__,
				 i, sgd->byte_count, sgd->control,
				 sgd->host_side_addr, sgd->next_desc_ptr);
		}
	}

	return 0;
}