コード例 #1
0
ファイル: target_core_rd.c プロジェクト: 19Dan01/linux
static sense_reason_t rd_do_prot_rw(struct se_cmd *cmd, dif_verify dif_verify)
{
	struct se_device *se_dev = cmd->se_dev;
	struct rd_dev *dev = RD_DEV(se_dev);
	struct rd_dev_sg_table *prot_table;
	bool need_to_release = false;
	struct scatterlist *prot_sg;
	u32 sectors = cmd->data_length / se_dev->dev_attrib.block_size;
	u32 prot_offset, prot_page;
	u32 prot_npages __maybe_unused;
	u64 tmp;
	sense_reason_t rc = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;

	tmp = cmd->t_task_lba * se_dev->prot_length;
	prot_offset = do_div(tmp, PAGE_SIZE);
	prot_page = tmp;

	prot_table = rd_get_prot_table(dev, prot_page);
	if (!prot_table)
		return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;

	prot_sg = &prot_table->sg_table[prot_page -
					prot_table->page_start_offset];

#ifndef CONFIG_ARCH_HAS_SG_CHAIN

	prot_npages = DIV_ROUND_UP(prot_offset + sectors * se_dev->prot_length,
				   PAGE_SIZE);

	/*
	 * Allocate temporaly contiguous scatterlist entries if prot pages
	 * straddles multiple scatterlist tables.
	 */
	if (prot_table->page_end_offset < prot_page + prot_npages - 1) {
		int i;

		prot_sg = kcalloc(prot_npages, sizeof(*prot_sg), GFP_KERNEL);
		if (!prot_sg)
			return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;

		need_to_release = true;
		sg_init_table(prot_sg, prot_npages);

		for (i = 0; i < prot_npages; i++) {
			if (prot_page + i > prot_table->page_end_offset) {
				prot_table = rd_get_prot_table(dev,
								prot_page + i);
				if (!prot_table) {
					kfree(prot_sg);
					return rc;
				}
				sg_unmark_end(&prot_sg[i - 1]);
			}
			prot_sg[i] = prot_table->sg_table[prot_page + i -
						prot_table->page_start_offset];
		}
	}

#endif /* !CONFIG_ARCH_HAS_SG_CHAIN */

	rc = dif_verify(cmd, cmd->t_task_lba, sectors, 0, prot_sg, prot_offset);
	if (need_to_release)
		kfree(prot_sg);

	return rc;
}
コード例 #2
0
ファイル: target_core_rd.c プロジェクト: 383530895/linux
static sense_reason_t
rd_execute_rw(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents,
	      enum dma_data_direction data_direction)
{
	struct se_device *se_dev = cmd->se_dev;
	struct rd_dev *dev = RD_DEV(se_dev);
	struct rd_dev_sg_table *table;
	struct scatterlist *rd_sg;
	struct sg_mapping_iter m;
	u32 rd_offset;
	u32 rd_size;
	u32 rd_page;
	u32 src_len;
	u64 tmp;
	sense_reason_t rc;

	if (dev->rd_flags & RDF_NULLIO) {
		target_complete_cmd(cmd, SAM_STAT_GOOD);
		return 0;
	}

	tmp = cmd->t_task_lba * se_dev->dev_attrib.block_size;
	rd_offset = do_div(tmp, PAGE_SIZE);
	rd_page = tmp;
	rd_size = cmd->data_length;

	table = rd_get_sg_table(dev, rd_page);
	if (!table)
		return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;

	rd_sg = &table->sg_table[rd_page - table->page_start_offset];

	pr_debug("RD[%u]: %s LBA: %llu, Size: %u Page: %u, Offset: %u\n",
			dev->rd_dev_id,
			data_direction == DMA_FROM_DEVICE ? "Read" : "Write",
			cmd->t_task_lba, rd_size, rd_page, rd_offset);

	if (cmd->prot_type && data_direction == DMA_TO_DEVICE) {
		struct rd_dev_sg_table *prot_table;
		struct scatterlist *prot_sg;
		u32 sectors = cmd->data_length / se_dev->dev_attrib.block_size;
		u32 prot_offset, prot_page;

		tmp = cmd->t_task_lba * se_dev->prot_length;
		prot_offset = do_div(tmp, PAGE_SIZE);
		prot_page = tmp;

		prot_table = rd_get_prot_table(dev, prot_page);
		if (!prot_table)
			return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;

		prot_sg = &prot_table->sg_table[prot_page - prot_table->page_start_offset];

		rc = sbc_dif_verify_write(cmd, cmd->t_task_lba, sectors, 0,
					  prot_sg, prot_offset);
		if (rc)
			return rc;
	}

	src_len = PAGE_SIZE - rd_offset;
	sg_miter_start(&m, sgl, sgl_nents,
			data_direction == DMA_FROM_DEVICE ?
				SG_MITER_TO_SG : SG_MITER_FROM_SG);
	while (rd_size) {
		u32 len;
		void *rd_addr;

		sg_miter_next(&m);
		if (!(u32)m.length) {
			pr_debug("RD[%u]: invalid sgl %p len %zu\n",
				 dev->rd_dev_id, m.addr, m.length);
			sg_miter_stop(&m);
			return TCM_INCORRECT_AMOUNT_OF_DATA;
		}
		len = min((u32)m.length, src_len);
		if (len > rd_size) {
			pr_debug("RD[%u]: size underrun page %d offset %d "
				 "size %d\n", dev->rd_dev_id,
				 rd_page, rd_offset, rd_size);
			len = rd_size;
		}
		m.consumed = len;

		rd_addr = sg_virt(rd_sg) + rd_offset;

		if (data_direction == DMA_FROM_DEVICE)
			memcpy(m.addr, rd_addr, len);
		else
			memcpy(rd_addr, m.addr, len);

		rd_size -= len;
		if (!rd_size)
			continue;

		src_len -= len;
		if (src_len) {
			rd_offset += len;
			continue;
		}

		/* rd page completed, next one please */
		rd_page++;
		rd_offset = 0;
		src_len = PAGE_SIZE;
		if (rd_page <= table->page_end_offset) {
			rd_sg++;
			continue;
		}

		table = rd_get_sg_table(dev, rd_page);
		if (!table) {
			sg_miter_stop(&m);
			return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
		}

		/* since we increment, the first sg entry is correct */
		rd_sg = table->sg_table;
	}
	sg_miter_stop(&m);

	if (cmd->prot_type && data_direction == DMA_FROM_DEVICE) {
		struct rd_dev_sg_table *prot_table;
		struct scatterlist *prot_sg;
		u32 sectors = cmd->data_length / se_dev->dev_attrib.block_size;
		u32 prot_offset, prot_page;

		tmp = cmd->t_task_lba * se_dev->prot_length;
		prot_offset = do_div(tmp, PAGE_SIZE);
		prot_page = tmp;

		prot_table = rd_get_prot_table(dev, prot_page);
		if (!prot_table)
			return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;

		prot_sg = &prot_table->sg_table[prot_page - prot_table->page_start_offset];

		rc = sbc_dif_verify_read(cmd, cmd->t_task_lba, sectors, 0,
					 prot_sg, prot_offset);
		if (rc)
			return rc;
	}

	target_complete_cmd(cmd, SAM_STAT_GOOD);
	return 0;
}