static void iblock_end_io_flush(struct bio *bio, int err) { struct se_cmd *cmd = bio->bi_private; if (err) pr_err("IBLOCK: cache flush failed: %d\n", err); if (cmd) { if (err) target_complete_cmd(cmd, SAM_STAT_CHECK_CONDITION); else target_complete_cmd(cmd, SAM_STAT_GOOD); } bio_put(bio); }
static void iblock_end_io_flush(struct bio *bio, int err) { struct se_cmd *cmd = bio->bi_private; if (err) pr_err("IBLOCK: cache flush failed: %d\n", err); if (cmd) { if (err) { cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; target_complete_cmd(cmd, SAM_STAT_CHECK_CONDITION); } else { target_complete_cmd(cmd, SAM_STAT_GOOD); } } bio_put(bio); }
static sense_reason_t iblock_execute_write_same_unmap(struct se_cmd *cmd) { struct block_device *bdev = IBLOCK_DEV(cmd->se_dev)->ibd_bd; sector_t lba = cmd->t_task_lba; sector_t nolb = sbc_get_write_same_sectors(cmd); int ret; ret = iblock_do_unmap(cmd, bdev, lba, nolb); if (ret) return ret; target_complete_cmd(cmd, GOOD); return 0; }
static void iblock_complete_cmd(struct se_cmd *cmd) { struct iblock_req *ibr = cmd->priv; u8 status; if (!atomic_dec_and_test(&ibr->pending)) return; if (atomic_read(&ibr->ib_bio_err_cnt)) status = SAM_STAT_CHECK_CONDITION; else status = SAM_STAT_GOOD; target_complete_cmd(cmd, status); kfree(ibr); }
static int iblock_execute_write_same(struct se_cmd *cmd) { struct iblock_dev *ibd = cmd->se_dev->dev_ptr; int ret; ret = blkdev_issue_discard(ibd->ibd_bd, cmd->t_task_lba, spc_get_write_same_sectors(cmd), GFP_KERNEL, 0); if (ret < 0) { pr_debug("blkdev_issue_discard() failed for WRITE_SAME\n"); return ret; } target_complete_cmd(cmd, GOOD); return 0; }
sense_reason_t target_do_xcopy(struct se_cmd *se_cmd) { struct se_device *dev = se_cmd->se_dev; struct xcopy_op *xop; unsigned int sa; if (!dev->dev_attrib.emulate_3pc) { pr_err("EXTENDED_COPY operation explicitly disabled\n"); return TCM_UNSUPPORTED_SCSI_OPCODE; } sa = se_cmd->t_task_cdb[1] & 0x1f; if (sa != 0x00) { pr_err("EXTENDED_COPY(LID4) not supported\n"); return TCM_UNSUPPORTED_SCSI_OPCODE; } if (se_cmd->data_length == 0) { target_complete_cmd(se_cmd, SAM_STAT_GOOD); return TCM_NO_SENSE; } if (se_cmd->data_length < XCOPY_HDR_LEN) { pr_err("XCOPY parameter truncation: length %u < hdr_len %u\n", se_cmd->data_length, XCOPY_HDR_LEN); return TCM_PARAMETER_LIST_LENGTH_ERROR; } xop = kzalloc(sizeof(struct xcopy_op), GFP_KERNEL); if (!xop) goto err; xop->xop_se_cmd = se_cmd; INIT_WORK(&xop->xop_work, target_xcopy_do_work); if (WARN_ON_ONCE(!queue_work(xcopy_wq, &xop->xop_work))) goto free; return TCM_NO_SENSE; free: kfree(xop); err: return TCM_OUT_OF_RESOURCES; }
/* * Implement SYCHRONIZE CACHE. Note that we can't handle lba ranges and must * always flush the whole cache. */ static int iblock_execute_sync_cache(struct se_cmd *cmd) { struct iblock_dev *ib_dev = cmd->se_dev->dev_ptr; int immed = (cmd->t_task_cdb[1] & 0x2); struct bio *bio; /* * If the Immediate bit is set, queue up the GOOD response * for this SYNCHRONIZE_CACHE op. */ if (immed) target_complete_cmd(cmd, SAM_STAT_GOOD); bio = bio_alloc(GFP_KERNEL, 0); bio->bi_end_io = iblock_end_io_flush; bio->bi_bdev = ib_dev->ibd_bd; if (!immed) bio->bi_private = cmd; submit_bio(WRITE_FLUSH, bio); return 0; }
static int iblock_execute_unmap(struct se_cmd *cmd) { struct se_device *dev = cmd->se_dev; struct iblock_dev *ibd = dev->dev_ptr; unsigned char *buf, *ptr = NULL; sector_t lba; int size; u32 range; int ret = 0; int dl, bd_dl; if (cmd->data_length < 8) { pr_warn("UNMAP parameter list length %u too small\n", cmd->data_length); cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST; return -EINVAL; } buf = transport_kmap_data_sg(cmd); dl = get_unaligned_be16(&buf[0]); bd_dl = get_unaligned_be16(&buf[2]); size = cmd->data_length - 8; if (bd_dl > size) pr_warn("UNMAP parameter list length %u too small, ignoring bd_dl %u\n", cmd->data_length, bd_dl); else size = bd_dl; if (size / 16 > dev->se_sub_dev->se_dev_attrib.max_unmap_block_desc_count) { cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST; ret = -EINVAL; goto err; } /* First UNMAP block descriptor starts at 8 byte offset */ ptr = &buf[8]; pr_debug("UNMAP: Sub: %s Using dl: %u bd_dl: %u size: %u" " ptr: %p\n", dev->transport->name, dl, bd_dl, size, ptr); while (size >= 16) { lba = get_unaligned_be64(&ptr[0]); range = get_unaligned_be32(&ptr[8]); pr_debug("UNMAP: Using lba: %llu and range: %u\n", (unsigned long long)lba, range); if (range > dev->se_sub_dev->se_dev_attrib.max_unmap_lba_count) { cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST; ret = -EINVAL; goto err; } if (lba + range > dev->transport->get_blocks(dev) + 1) { cmd->scsi_sense_reason = TCM_ADDRESS_OUT_OF_RANGE; ret = -EINVAL; goto err; } ret = blkdev_issue_discard(ibd->ibd_bd, lba, range, GFP_KERNEL, 0); if (ret < 0) { pr_err("blkdev_issue_discard() failed: %d\n", ret); goto err; } ptr += 16; size -= 16; } err: transport_kunmap_data_sg(cmd); if (!ret) target_complete_cmd(cmd, GOOD); return ret; }
static sense_reason_t rd_execute_rw(struct se_cmd *cmd) { struct scatterlist *sgl = cmd->t_data_sg; u32 sgl_nents = cmd->t_data_nents; enum dma_data_direction data_direction = cmd->data_direction; struct se_device *se_dev = cmd->se_dev; struct rd_dev *dev = RD_DEV(se_dev); struct rd_dev_sg_table *table; struct scatterlist *rd_sg; struct sg_mapping_iter m; u32 rd_offset; u32 rd_size; u32 rd_page; u32 src_len; u64 tmp; if (dev->rd_flags & RDF_NULLIO) { target_complete_cmd(cmd, SAM_STAT_GOOD); return 0; } tmp = cmd->t_task_lba * se_dev->dev_attrib.block_size; rd_offset = do_div(tmp, PAGE_SIZE); rd_page = tmp; rd_size = cmd->data_length; table = rd_get_sg_table(dev, rd_page); if (!table) return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; rd_sg = &table->sg_table[rd_page - table->page_start_offset]; pr_debug("RD[%u]: %s LBA: %llu, Size: %u Page: %u, Offset: %u\n", dev->rd_dev_id, data_direction == DMA_FROM_DEVICE ? "Read" : "Write", cmd->t_task_lba, rd_size, rd_page, rd_offset); src_len = PAGE_SIZE - rd_offset; sg_miter_start(&m, sgl, sgl_nents, data_direction == DMA_FROM_DEVICE ? SG_MITER_TO_SG : SG_MITER_FROM_SG); while (rd_size) { u32 len; void *rd_addr; sg_miter_next(&m); if (!(u32)m.length) { pr_debug("RD[%u]: invalid sgl %p len %zu\n", dev->rd_dev_id, m.addr, m.length); sg_miter_stop(&m); return TCM_INCORRECT_AMOUNT_OF_DATA; } len = min((u32)m.length, src_len); if (len > rd_size) { pr_debug("RD[%u]: size underrun page %d offset %d " "size %d\n", dev->rd_dev_id, rd_page, rd_offset, rd_size); len = rd_size; } m.consumed = len; rd_addr = sg_virt(rd_sg) + rd_offset; if (data_direction == DMA_FROM_DEVICE) memcpy(m.addr, rd_addr, len); else memcpy(rd_addr, m.addr, len); rd_size -= len; if (!rd_size) continue; src_len -= len; if (src_len) { rd_offset += len; continue; } /* rd page completed, next one please */ rd_page++; rd_offset = 0; src_len = PAGE_SIZE; if (rd_page <= table->page_end_offset) { rd_sg++; continue; } table = rd_get_sg_table(dev, rd_page); if (!table) { sg_miter_stop(&m); return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; } /* since we increment, the first sg entry is correct */ rd_sg = table->sg_table; } sg_miter_stop(&m); target_complete_cmd(cmd, SAM_STAT_GOOD); return 0; }
static void target_xcopy_do_work(struct work_struct *work) { struct xcopy_op *xop = container_of(work, struct xcopy_op, xop_work); struct se_cmd *ec_cmd = xop->xop_se_cmd; struct se_device *src_dev, *dst_dev; sector_t src_lba, dst_lba, end_lba; unsigned int max_sectors; int rc = 0; unsigned short nolb, cur_nolb, max_nolb, copied_nolb = 0; if (target_parse_xcopy_cmd(xop) != TCM_NO_SENSE) goto err_free; if (WARN_ON_ONCE(!xop->src_dev) || WARN_ON_ONCE(!xop->dst_dev)) goto err_free; src_dev = xop->src_dev; dst_dev = xop->dst_dev; src_lba = xop->src_lba; dst_lba = xop->dst_lba; nolb = xop->nolb; end_lba = src_lba + nolb; /* * Break up XCOPY I/O into hw_max_sectors sized I/O based on the * smallest max_sectors between src_dev + dev_dev, or */ max_sectors = min(src_dev->dev_attrib.hw_max_sectors, dst_dev->dev_attrib.hw_max_sectors); max_sectors = min_t(u32, max_sectors, XCOPY_MAX_SECTORS); max_nolb = min_t(u16, max_sectors, ((u16)(~0U))); pr_debug("target_xcopy_do_work: nolb: %hu, max_nolb: %hu end_lba: %llu\n", nolb, max_nolb, (unsigned long long)end_lba); pr_debug("target_xcopy_do_work: Starting src_lba: %llu, dst_lba: %llu\n", (unsigned long long)src_lba, (unsigned long long)dst_lba); while (src_lba < end_lba) { cur_nolb = min(nolb, max_nolb); pr_debug("target_xcopy_do_work: Calling read src_dev: %p src_lba: %llu," " cur_nolb: %hu\n", src_dev, (unsigned long long)src_lba, cur_nolb); rc = target_xcopy_read_source(ec_cmd, xop, src_dev, src_lba, cur_nolb); if (rc < 0) goto out; src_lba += cur_nolb; pr_debug("target_xcopy_do_work: Incremented READ src_lba to %llu\n", (unsigned long long)src_lba); pr_debug("target_xcopy_do_work: Calling write dst_dev: %p dst_lba: %llu," " cur_nolb: %hu\n", dst_dev, (unsigned long long)dst_lba, cur_nolb); rc = target_xcopy_write_destination(ec_cmd, xop, dst_dev, dst_lba, cur_nolb); if (rc < 0) { transport_generic_free_cmd(&xop->src_pt_cmd->se_cmd, 0); goto out; } dst_lba += cur_nolb; pr_debug("target_xcopy_do_work: Incremented WRITE dst_lba to %llu\n", (unsigned long long)dst_lba); copied_nolb += cur_nolb; nolb -= cur_nolb; transport_generic_free_cmd(&xop->src_pt_cmd->se_cmd, 0); xop->dst_pt_cmd->se_cmd.se_cmd_flags &= ~SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC; transport_generic_free_cmd(&xop->dst_pt_cmd->se_cmd, 0); } xcopy_pt_undepend_remotedev(xop); kfree(xop); pr_debug("target_xcopy_do_work: Final src_lba: %llu, dst_lba: %llu\n", (unsigned long long)src_lba, (unsigned long long)dst_lba); pr_debug("target_xcopy_do_work: Blocks copied: %hu, Bytes Copied: %u\n", copied_nolb, copied_nolb * dst_dev->dev_attrib.block_size); pr_debug("target_xcopy_do_work: Setting X-COPY GOOD status -> sending response\n"); target_complete_cmd(ec_cmd, SAM_STAT_GOOD); return; out: xcopy_pt_undepend_remotedev(xop); err_free: kfree(xop); /* * Don't override an error scsi status if it has already been set */ if (ec_cmd->scsi_status == SAM_STAT_GOOD) { pr_warn_ratelimited("target_xcopy_do_work: rc: %d, Setting X-COPY" " CHECK_CONDITION -> sending response\n", rc); ec_cmd->scsi_status = SAM_STAT_CHECK_CONDITION; } target_complete_cmd(ec_cmd, ec_cmd->scsi_status); }
static sense_reason_t target_rcr_operating_parameters(struct se_cmd *se_cmd) { unsigned char *p; p = transport_kmap_data_sg(se_cmd); if (!p) { pr_err("transport_kmap_data_sg failed in" " target_rcr_operating_parameters\n"); return TCM_OUT_OF_RESOURCES; } if (se_cmd->data_length < 54) { pr_err("Receive Copy Results Op Parameters length" " too small: %u\n", se_cmd->data_length); transport_kunmap_data_sg(se_cmd); return TCM_INVALID_CDB_FIELD; } /* * Set SNLID=1 (Supports no List ID) */ p[4] = 0x1; /* * MAXIMUM TARGET DESCRIPTOR COUNT */ put_unaligned_be16(RCR_OP_MAX_TARGET_DESC_COUNT, &p[8]); /* * MAXIMUM SEGMENT DESCRIPTOR COUNT */ put_unaligned_be16(RCR_OP_MAX_SG_DESC_COUNT, &p[10]); /* * MAXIMUM DESCRIPTOR LIST LENGTH */ put_unaligned_be32(RCR_OP_MAX_DESC_LIST_LEN, &p[12]); /* * MAXIMUM SEGMENT LENGTH */ put_unaligned_be32(RCR_OP_MAX_SEGMENT_LEN, &p[16]); /* * MAXIMUM INLINE DATA LENGTH for SA 0x04 (NOT SUPPORTED) */ put_unaligned_be32(0x0, &p[20]); /* * HELD DATA LIMIT */ put_unaligned_be32(0x0, &p[24]); /* * MAXIMUM STREAM DEVICE TRANSFER SIZE */ put_unaligned_be32(0x0, &p[28]); /* * TOTAL CONCURRENT COPIES */ put_unaligned_be16(RCR_OP_TOTAL_CONCURR_COPIES, &p[34]); /* * MAXIMUM CONCURRENT COPIES */ p[36] = RCR_OP_MAX_CONCURR_COPIES; /* * DATA SEGMENT GRANULARITY (log 2) */ p[37] = RCR_OP_DATA_SEG_GRAN_LOG2; /* * INLINE DATA GRANULARITY log 2) */ p[38] = RCR_OP_INLINE_DATA_GRAN_LOG2; /* * HELD DATA GRANULARITY */ p[39] = RCR_OP_HELD_DATA_GRAN_LOG2; /* * IMPLEMENTED DESCRIPTOR LIST LENGTH */ p[43] = 0x2; /* * List of implemented descriptor type codes (ordered) */ p[44] = 0x02; /* Copy Block to Block device */ p[45] = 0xe4; /* Identification descriptor target descriptor */ /* * AVAILABLE DATA (n-3) */ put_unaligned_be32(42, &p[0]); transport_kunmap_data_sg(se_cmd); target_complete_cmd(se_cmd, GOOD); return TCM_NO_SENSE; }