static int iblock_execute_rw(struct se_cmd *cmd) { struct scatterlist *sgl = cmd->t_data_sg; u32 sgl_nents = cmd->t_data_nents; enum dma_data_direction data_direction = cmd->data_direction; struct se_device *dev = cmd->se_dev; struct iblock_req *ibr; struct bio *bio; struct bio_list list; struct scatterlist *sg; u32 sg_num = sgl_nents; sector_t block_lba; unsigned bio_cnt; int rw; int i; if (data_direction == DMA_TO_DEVICE) { /* * Force data to disk if we pretend to not have a volatile * write cache, or the initiator set the Force Unit Access bit. */ if (dev->se_sub_dev->se_dev_attrib.emulate_write_cache == 0 || (dev->se_sub_dev->se_dev_attrib.emulate_fua_write > 0 && (cmd->se_cmd_flags & SCF_FUA))) rw = WRITE_FUA; else rw = WRITE; } else { rw = READ; } /* * Convert the blocksize advertised to the initiator to the 512 byte * units unconditionally used by the Linux block layer. */ if (dev->se_sub_dev->se_dev_attrib.block_size == 4096) block_lba = (cmd->t_task_lba << 3); else if (dev->se_sub_dev->se_dev_attrib.block_size == 2048) block_lba = (cmd->t_task_lba << 2); else if (dev->se_sub_dev->se_dev_attrib.block_size == 1024) block_lba = (cmd->t_task_lba << 1); else if (dev->se_sub_dev->se_dev_attrib.block_size == 512) block_lba = cmd->t_task_lba; else { pr_err("Unsupported SCSI -> BLOCK LBA conversion:" " %u\n", dev->se_sub_dev->se_dev_attrib.block_size); cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; return -ENOSYS; } ibr = kzalloc(sizeof(struct iblock_req), GFP_KERNEL); if (!ibr) goto fail; cmd->priv = ibr; bio = iblock_get_bio(cmd, block_lba, sgl_nents); if (!bio) goto fail_free_ibr; bio_list_init(&list); bio_list_add(&list, bio); atomic_set(&ibr->pending, 2); bio_cnt = 1; for_each_sg(sgl, sg, sgl_nents, i) { /* * XXX: if the length the device accepts is shorter than the * length of the S/G list entry this will cause and * endless loop. Better hope no driver uses huge pages. */ while (bio_add_page(bio, sg_page(sg), sg->length, sg->offset) != sg->length) { if (bio_cnt >= IBLOCK_MAX_BIO_PER_TASK) { iblock_submit_bios(&list, rw); bio_cnt = 0; } bio = iblock_get_bio(cmd, block_lba, sg_num); if (!bio) goto fail_put_bios; atomic_inc(&ibr->pending); bio_list_add(&list, bio); bio_cnt++; } /* Always in 512 byte units for Linux/Block */ block_lba += sg->length >> IBLOCK_LBA_SHIFT; sg_num--; } iblock_submit_bios(&list, rw); iblock_complete_cmd(cmd); return 0; fail_put_bios: while ((bio = bio_list_pop(&list))) bio_put(bio); fail_free_ibr: kfree(ibr); cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; fail: return -ENOMEM; }
static sense_reason_t iblock_execute_write_same(struct se_cmd *cmd) { struct iblock_req *ibr; struct scatterlist *sg; struct bio *bio; struct bio_list list; sector_t block_lba = cmd->t_task_lba; sector_t sectors = sbc_get_write_same_sectors(cmd); sg = &cmd->t_data_sg[0]; if (cmd->t_data_nents > 1 || sg->length != cmd->se_dev->dev_attrib.block_size) { pr_err("WRITE_SAME: Illegal SGL t_data_nents: %u length: %u" " block_size: %u\n", cmd->t_data_nents, sg->length, cmd->se_dev->dev_attrib.block_size); return TCM_INVALID_CDB_FIELD; } ibr = kzalloc(sizeof(struct iblock_req), GFP_KERNEL); if (!ibr) goto fail; cmd->priv = ibr; bio = iblock_get_bio(cmd, block_lba, 1); if (!bio) goto fail_free_ibr; bio_list_init(&list); bio_list_add(&list, bio); atomic_set(&ibr->pending, 1); while (sectors) { while (bio_add_page(bio, sg_page(sg), sg->length, sg->offset) != sg->length) { bio = iblock_get_bio(cmd, block_lba, 1); if (!bio) goto fail_put_bios; atomic_inc(&ibr->pending); bio_list_add(&list, bio); } /* Always in 512 byte units for Linux/Block */ block_lba += sg->length >> IBLOCK_LBA_SHIFT; sectors -= 1; } iblock_submit_bios(&list, WRITE); return 0; fail_put_bios: while ((bio = bio_list_pop(&list))) bio_put(bio); fail_free_ibr: kfree(ibr); fail: return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; }