int iscsi_readcapacity10_async(struct iscsi_context *iscsi, int lun, iscsi_command_cb cb, int lba, int pmi, void *private_data) { struct scsi_task *task; int ret; if ((task = scsi_cdb_readcapacity10(lba, pmi)) == NULL) { printf("Failed to create readcapacity10 cdb\n"); return -1; } ret = iscsi_scsi_command_async(iscsi, lun, task, cb, NULL, private_data); return ret; }
int iscsi_modesense6_async(struct iscsi_context *iscsi, int lun, iscsi_command_cb cb, int dbd, int pc, int page_code, int sub_page_code, unsigned char alloc_len, void *private_data) { struct scsi_task *task; int ret; if ((task = scsi_cdb_modesense6(dbd, pc, page_code, sub_page_code, alloc_len)) == NULL) { printf("Failed to create modesense6 cdb\n"); return -2; } ret = iscsi_scsi_command_async(iscsi, lun, task, cb, NULL, private_data); return ret; }
int iscsi_inquiry_async(struct iscsi_context *iscsi, int lun, iscsi_command_cb cb, int evpd, int page_code, int maxsize, void *private_data) { struct scsi_task *task; int ret; if ((task = scsi_cdb_inquiry(evpd, page_code, maxsize)) == NULL) { printf("Failed to create inquiry cdb\n"); return -1; } ret = iscsi_scsi_command_async(iscsi, lun, task, cb, NULL, private_data); return ret; }
int iscsi_testunitready_async(struct iscsi_context *iscsi, int lun, iscsi_command_cb cb, void *private_data) { struct scsi_task *task; int ret; if ((task = scsi_cdb_testunitready()) == NULL) { printf("Failed to create testunitready cdb\n"); return -1; } ret = iscsi_scsi_command_async(iscsi, lun, task, cb, NULL, private_data); return ret; }
int iscsi_read10_async(struct iscsi_context *iscsi, int lun, iscsi_command_cb cb, int lba, int datalen, int blocksize, void *private_data) { struct scsi_task *task; int ret; if (datalen % blocksize != 0) { printf("datalen:%d is not a multiple of the blocksize:%d\n", datalen, blocksize); return -1; } if ((task = scsi_cdb_read10(lba, datalen, blocksize)) == NULL) { printf("Failed to create read10 cdb\n"); return -2; } ret = iscsi_scsi_command_async(iscsi, lun, task, cb, NULL, private_data); return ret; }
int iscsi_reportluns_async(struct iscsi_context *iscsi, iscsi_command_cb cb, int report_type, int alloc_len, void *private_data) { struct scsi_task *task; int ret; if (alloc_len < 16) { printf("Minimum allowed alloc len for reportluns is 16. You specified %d\n", alloc_len); return -1; } if ((task = scsi_reportluns_cdb(report_type, alloc_len)) == NULL) { printf("Failed to create reportluns cdb\n"); return -2; } /* report luns are always sent to lun 0 */ ret = iscsi_scsi_command_async(iscsi, 0, task, cb, NULL, private_data); return ret; }
int iscsi_write10_async(struct iscsi_context *iscsi, int lun, iscsi_command_cb cb, unsigned char *data, int datalen, int lba, int fua, int fuanv, int blocksize, void *private_data) { struct scsi_task *task; struct iscsi_data outdata; int ret; if (datalen % blocksize != 0) { printf("datalen:%d is not a multiple of the blocksize:%d\n", datalen, blocksize); return -1; } if ((task = scsi_cdb_write10(lba, datalen, fua, fuanv, blocksize)) == NULL) { printf("Failed to create read10 cdb\n"); return -2; } outdata.data = data; outdata.size = datalen; ret = iscsi_scsi_command_async(iscsi, lun, task, cb, &outdata, private_data); return ret; }
static BlockDriverAIOCB *iscsi_aio_ioctl(BlockDriverState *bs, unsigned long int req, void *buf, BlockDriverCompletionFunc *cb, void *opaque) { IscsiLun *iscsilun = bs->opaque; struct iscsi_context *iscsi = iscsilun->iscsi; struct iscsi_data data; IscsiAIOCB *acb; assert(req == SG_IO); acb = qemu_aio_get(&iscsi_aiocb_info, bs, cb, opaque); acb->iscsilun = iscsilun; acb->canceled = 0; acb->bh = NULL; acb->status = -EINPROGRESS; acb->buf = NULL; acb->ioh = buf; acb->task = malloc(sizeof(struct scsi_task)); if (acb->task == NULL) { error_report("iSCSI: Failed to allocate task for scsi command. %s", iscsi_get_error(iscsi)); qemu_aio_release(acb); return NULL; } memset(acb->task, 0, sizeof(struct scsi_task)); switch (acb->ioh->dxfer_direction) { case SG_DXFER_TO_DEV: acb->task->xfer_dir = SCSI_XFER_WRITE; break; case SG_DXFER_FROM_DEV: acb->task->xfer_dir = SCSI_XFER_READ; break; default: acb->task->xfer_dir = SCSI_XFER_NONE; break; } acb->task->cdb_size = acb->ioh->cmd_len; memcpy(&acb->task->cdb[0], acb->ioh->cmdp, acb->ioh->cmd_len); acb->task->expxferlen = acb->ioh->dxfer_len; data.size = 0; if (acb->task->xfer_dir == SCSI_XFER_WRITE) { if (acb->ioh->iovec_count == 0) { data.data = acb->ioh->dxferp; data.size = acb->ioh->dxfer_len; } else { #if defined(LIBISCSI_FEATURE_IOVECTOR) scsi_task_set_iov_out(acb->task, (struct scsi_iovec *) acb->ioh->dxferp, acb->ioh->iovec_count); #else struct iovec *iov = (struct iovec *)acb->ioh->dxferp; acb->buf = g_malloc(acb->ioh->dxfer_len); data.data = acb->buf; data.size = iov_to_buf(iov, acb->ioh->iovec_count, 0, acb->buf, acb->ioh->dxfer_len); #endif } } if (iscsi_scsi_command_async(iscsi, iscsilun->lun, acb->task, iscsi_aio_ioctl_cb, (data.size > 0) ? &data : NULL, acb) != 0) { scsi_free_scsi_task(acb->task); qemu_aio_release(acb); return NULL; } /* tell libiscsi to read straight into the buffer we got from ioctl */ if (acb->task->xfer_dir == SCSI_XFER_READ) { if (acb->ioh->iovec_count == 0) { scsi_task_add_data_in_buffer(acb->task, acb->ioh->dxfer_len, acb->ioh->dxferp); } else { #if defined(LIBISCSI_FEATURE_IOVECTOR) scsi_task_set_iov_in(acb->task, (struct scsi_iovec *) acb->ioh->dxferp, acb->ioh->iovec_count); #else int i; for (i = 0; i < acb->ioh->iovec_count; i++) { struct iovec *iov = (struct iovec *)acb->ioh->dxferp; scsi_task_add_data_in_buffer(acb->task, iov[i].iov_len, iov[i].iov_base); } #endif } } iscsi_set_events(iscsilun); return &acb->common; }
static int iscsi_aio_readv_acb(IscsiAIOCB *acb) { struct iscsi_context *iscsi = acb->iscsilun->iscsi; size_t size; uint64_t lba; uint32_t num_sectors; int ret; #if !defined(LIBISCSI_FEATURE_IOVECTOR) int i; #endif acb->canceled = 0; acb->bh = NULL; acb->status = -EINPROGRESS; acb->buf = NULL; size = acb->nb_sectors * BDRV_SECTOR_SIZE; acb->task = malloc(sizeof(struct scsi_task)); if (acb->task == NULL) { error_report("iSCSI: Failed to allocate task for scsi READ16 " "command. %s", iscsi_get_error(iscsi)); return -1; } memset(acb->task, 0, sizeof(struct scsi_task)); acb->task->xfer_dir = SCSI_XFER_READ; acb->task->expxferlen = size; lba = sector_qemu2lun(acb->sector_num, acb->iscsilun); num_sectors = sector_qemu2lun(acb->nb_sectors, acb->iscsilun); switch (acb->iscsilun->type) { case TYPE_DISK: acb->task->cdb_size = 16; acb->task->cdb[0] = 0x88; *(uint32_t *)&acb->task->cdb[2] = htonl(lba >> 32); *(uint32_t *)&acb->task->cdb[6] = htonl(lba & 0xffffffff); *(uint32_t *)&acb->task->cdb[10] = htonl(num_sectors); break; default: acb->task->cdb_size = 10; acb->task->cdb[0] = 0x28; *(uint32_t *)&acb->task->cdb[2] = htonl(lba); *(uint16_t *)&acb->task->cdb[7] = htons(num_sectors); break; } ret = iscsi_scsi_command_async(iscsi, acb->iscsilun->lun, acb->task, iscsi_aio_read16_cb, NULL, acb); if (ret != 0) { scsi_free_scsi_task(acb->task); return -1; } #if defined(LIBISCSI_FEATURE_IOVECTOR) scsi_task_set_iov_in(acb->task, (struct scsi_iovec*) acb->qiov->iov, acb->qiov->niov); #else for (i = 0; i < acb->qiov->niov; i++) { scsi_task_add_data_in_buffer(acb->task, acb->qiov->iov[i].iov_len, acb->qiov->iov[i].iov_base); } #endif return 0; }
static int iscsi_aio_writev_acb(IscsiAIOCB *acb) { struct iscsi_context *iscsi = acb->iscsilun->iscsi; size_t size; uint32_t num_sectors; uint64_t lba; #if !defined(LIBISCSI_FEATURE_IOVECTOR) struct iscsi_data data; #endif int ret; acb->canceled = 0; acb->bh = NULL; acb->status = -EINPROGRESS; acb->buf = NULL; /* this will allow us to get rid of 'buf' completely */ size = acb->nb_sectors * BDRV_SECTOR_SIZE; #if !defined(LIBISCSI_FEATURE_IOVECTOR) data.size = MIN(size, acb->qiov->size); /* if the iovec only contains one buffer we can pass it directly */ if (acb->qiov->niov == 1) { data.data = acb->qiov->iov[0].iov_base; } else { acb->buf = g_malloc(data.size); qemu_iovec_to_buf(acb->qiov, 0, acb->buf, data.size); data.data = acb->buf; } #endif acb->task = malloc(sizeof(struct scsi_task)); if (acb->task == NULL) { error_report("iSCSI: Failed to allocate task for scsi WRITE16 " "command. %s", iscsi_get_error(iscsi)); return -1; } memset(acb->task, 0, sizeof(struct scsi_task)); acb->task->xfer_dir = SCSI_XFER_WRITE; acb->task->cdb_size = 16; acb->task->cdb[0] = 0x8a; lba = sector_qemu2lun(acb->sector_num, acb->iscsilun); *(uint32_t *)&acb->task->cdb[2] = htonl(lba >> 32); *(uint32_t *)&acb->task->cdb[6] = htonl(lba & 0xffffffff); num_sectors = sector_qemu2lun(acb->nb_sectors, acb->iscsilun); *(uint32_t *)&acb->task->cdb[10] = htonl(num_sectors); acb->task->expxferlen = size; #if defined(LIBISCSI_FEATURE_IOVECTOR) ret = iscsi_scsi_command_async(iscsi, acb->iscsilun->lun, acb->task, iscsi_aio_write16_cb, NULL, acb); #else ret = iscsi_scsi_command_async(iscsi, acb->iscsilun->lun, acb->task, iscsi_aio_write16_cb, &data, acb); #endif if (ret != 0) { scsi_free_scsi_task(acb->task); g_free(acb->buf); return -1; } #if defined(LIBISCSI_FEATURE_IOVECTOR) scsi_task_set_iov_out(acb->task, (struct scsi_iovec*) acb->qiov->iov, acb->qiov->niov); #endif return 0; }
static BlockDriverAIOCB * iscsi_aio_readv(BlockDriverState *bs, int64_t sector_num, QEMUIOVector *qiov, int nb_sectors, BlockDriverCompletionFunc *cb, void *opaque) { IscsiLun *iscsilun = bs->opaque; struct iscsi_context *iscsi = iscsilun->iscsi; IscsiAIOCB *acb; size_t qemu_read_size; int i; uint64_t lba; uint32_t num_sectors; qemu_read_size = BDRV_SECTOR_SIZE * (size_t)nb_sectors; acb = qemu_aio_get(&iscsi_aiocb_info, bs, cb, opaque); trace_iscsi_aio_readv(iscsi, sector_num, nb_sectors, opaque, acb); acb->iscsilun = iscsilun; acb->qiov = qiov; acb->canceled = 0; acb->bh = NULL; acb->status = -EINPROGRESS; acb->read_size = qemu_read_size; acb->buf = NULL; /* If LUN blocksize is bigger than BDRV_BLOCK_SIZE a read from QEMU * may be misaligned to the LUN, so we may need to read some extra * data. */ acb->read_offset = 0; if (iscsilun->block_size > BDRV_SECTOR_SIZE) { uint64_t bdrv_offset = BDRV_SECTOR_SIZE * sector_num; acb->read_offset = bdrv_offset % iscsilun->block_size; } num_sectors = (qemu_read_size + iscsilun->block_size + acb->read_offset - 1) / iscsilun->block_size; acb->task = malloc(sizeof(struct scsi_task)); if (acb->task == NULL) { error_report("iSCSI: Failed to allocate task for scsi READ16 " "command. %s", iscsi_get_error(iscsi)); qemu_aio_release(acb); return NULL; } memset(acb->task, 0, sizeof(struct scsi_task)); acb->task->xfer_dir = SCSI_XFER_READ; lba = sector_qemu2lun(sector_num, iscsilun); acb->task->expxferlen = qemu_read_size; switch (iscsilun->type) { case TYPE_DISK: acb->task->cdb_size = 16; acb->task->cdb[0] = 0x88; *(uint32_t *)&acb->task->cdb[2] = htonl(lba >> 32); *(uint32_t *)&acb->task->cdb[6] = htonl(lba & 0xffffffff); *(uint32_t *)&acb->task->cdb[10] = htonl(num_sectors); break; default: acb->task->cdb_size = 10; acb->task->cdb[0] = 0x28; *(uint32_t *)&acb->task->cdb[2] = htonl(lba); *(uint16_t *)&acb->task->cdb[7] = htons(num_sectors); break; } if (iscsi_scsi_command_async(iscsi, iscsilun->lun, acb->task, iscsi_aio_read16_cb, NULL, acb) != 0) { scsi_free_scsi_task(acb->task); qemu_aio_release(acb); return NULL; } for (i = 0; i < acb->qiov->niov; i++) { scsi_task_add_data_in_buffer(acb->task, acb->qiov->iov[i].iov_len, acb->qiov->iov[i].iov_base); } iscsi_set_events(iscsilun); return &acb->common; }
static BlockDriverAIOCB * iscsi_aio_writev(BlockDriverState *bs, int64_t sector_num, QEMUIOVector *qiov, int nb_sectors, BlockDriverCompletionFunc *cb, void *opaque) { IscsiLun *iscsilun = bs->opaque; struct iscsi_context *iscsi = iscsilun->iscsi; IscsiAIOCB *acb; size_t size; uint32_t num_sectors; uint64_t lba; struct iscsi_data data; acb = qemu_aio_get(&iscsi_aiocb_info, bs, cb, opaque); trace_iscsi_aio_writev(iscsi, sector_num, nb_sectors, opaque, acb); acb->iscsilun = iscsilun; acb->qiov = qiov; acb->canceled = 0; acb->bh = NULL; acb->status = -EINPROGRESS; /* XXX we should pass the iovec to write16 to avoid the extra copy */ /* this will allow us to get rid of 'buf' completely */ size = nb_sectors * BDRV_SECTOR_SIZE; data.size = MIN(size, acb->qiov->size); /* if the iovec only contains one buffer we can pass it directly */ if (acb->qiov->niov == 1) { acb->buf = NULL; data.data = acb->qiov->iov[0].iov_base; } else { acb->buf = g_malloc(data.size); qemu_iovec_to_buf(acb->qiov, 0, acb->buf, data.size); data.data = acb->buf; } acb->task = malloc(sizeof(struct scsi_task)); if (acb->task == NULL) { error_report("iSCSI: Failed to allocate task for scsi WRITE16 " "command. %s", iscsi_get_error(iscsi)); qemu_aio_release(acb); return NULL; } memset(acb->task, 0, sizeof(struct scsi_task)); acb->task->xfer_dir = SCSI_XFER_WRITE; acb->task->cdb_size = 16; acb->task->cdb[0] = 0x8a; lba = sector_qemu2lun(sector_num, iscsilun); *(uint32_t *)&acb->task->cdb[2] = htonl(lba >> 32); *(uint32_t *)&acb->task->cdb[6] = htonl(lba & 0xffffffff); num_sectors = size / iscsilun->block_size; *(uint32_t *)&acb->task->cdb[10] = htonl(num_sectors); acb->task->expxferlen = size; if (iscsi_scsi_command_async(iscsi, iscsilun->lun, acb->task, iscsi_aio_write16_cb, &data, acb) != 0) { scsi_free_scsi_task(acb->task); g_free(acb->buf); qemu_aio_release(acb); return NULL; } iscsi_set_events(iscsilun); return &acb->common; }
static int iscsi_aio_readv_acb(IscsiAIOCB *acb) { struct iscsi_context *iscsi = acb->iscsilun->iscsi; uint64_t lba; uint32_t num_sectors; int ret; #if !defined(LIBISCSI_FEATURE_IOVECTOR) int i; #endif acb->canceled = 0; acb->bh = NULL; acb->status = -EINPROGRESS; acb->buf = NULL; /* If LUN blocksize is bigger than BDRV_BLOCK_SIZE a read from QEMU * may be misaligned to the LUN, so we may need to read some extra * data. */ acb->read_offset = 0; if (acb->iscsilun->block_size > BDRV_SECTOR_SIZE) { uint64_t bdrv_offset = BDRV_SECTOR_SIZE * acb->sector_num; acb->read_offset = bdrv_offset % acb->iscsilun->block_size; } num_sectors = (acb->read_size + acb->iscsilun->block_size + acb->read_offset - 1) / acb->iscsilun->block_size; acb->task = malloc(sizeof(struct scsi_task)); if (acb->task == NULL) { error_report("iSCSI: Failed to allocate task for scsi READ16 " "command. %s", iscsi_get_error(iscsi)); return -1; } memset(acb->task, 0, sizeof(struct scsi_task)); acb->task->xfer_dir = SCSI_XFER_READ; lba = sector_qemu2lun(acb->sector_num, acb->iscsilun); acb->task->expxferlen = acb->read_size; switch (acb->iscsilun->type) { case TYPE_DISK: acb->task->cdb_size = 16; acb->task->cdb[0] = 0x88; *(uint32_t *)&acb->task->cdb[2] = htonl(lba >> 32); *(uint32_t *)&acb->task->cdb[6] = htonl(lba & 0xffffffff); *(uint32_t *)&acb->task->cdb[10] = htonl(num_sectors); break; default: acb->task->cdb_size = 10; acb->task->cdb[0] = 0x28; *(uint32_t *)&acb->task->cdb[2] = htonl(lba); *(uint16_t *)&acb->task->cdb[7] = htons(num_sectors); break; } ret = iscsi_scsi_command_async(iscsi, acb->iscsilun->lun, acb->task, iscsi_aio_read16_cb, NULL, acb); if (ret != 0) { return -1; } #if defined(LIBISCSI_FEATURE_IOVECTOR) scsi_task_set_iov_in(acb->task, (struct scsi_iovec*) acb->qiov->iov, acb->qiov->niov); #else for (i = 0; i < acb->qiov->niov; i++) { scsi_task_add_data_in_buffer(acb->task, acb->qiov->iov[i].iov_len, acb->qiov->iov[i].iov_base); } #endif return 0; }
/* * Execute a SCSI request synchronously */ void iSCSILibWrapper::iSCSIExecSCSISync(SCSIRequest &request, unsigned int lun) { // Remove us from the background thread iSCSIBackGround::GetInstance().RemoveConnection(*this); struct iscsi_data data; struct scsi_task *task = request.GetTask(); if (!mClient.connected || mClient.error) { if (mClient.error) mErrorString.Format("%s: previous error prevents executing SCSI request on target %s: %s", __func__, mTarget.c_str(), iscsi_get_error(mIscsi)); else mErrorString.Format("%s: Executing request on target %s not possible without a connection!", __func__, mTarget.c_str()); mError = true; throw CException(mErrorString); } // You cannot re-execute a request unless you reset it if (request.IsExecuted()) { EString estr; estr.Format("%s: SCSI Request already executed!", __func__); mError = true; throw CException(estr); } if (!task) // Throw an exception { EString estr; estr.Format("%s: SCSIRequest does not have a task defined", __func__); mError = true; throw CException(estr); } switch (task->xfer_dir) { default: case SCSI_XFER_NONE: data.data = NULL; data.size = 0; break; case SCSI_XFER_READ: data.data = (unsigned char *)request.GetInBuffer().get(); data.size = request.GetInBufferSize(); break; case SCSI_XFER_WRITE: data.data = (unsigned char *)request.GetOutBuffer().get(); data.size = request.GetOutBufferSize(); break; } task-> expxferlen = data.size; mClient.finished = 0; if (iscsi_scsi_command_async(mIscsi, lun, task, exec_cb, &data, this)) { EString estr; estr.Format("%s: Error executing SCSI request: %s", __func__, iscsi_get_error(mIscsi)); mError = true; throw CException(mErrorString); } ServiceISCSIEvents(); // Add to the background task iSCSIBackGround::GetInstance().AddConnection(*this); /* * Now, transfer any data back ... this might have to change if Ronnie * adds support for it in the library ... */ switch (task->xfer_dir) { case SCSI_XFER_READ: memcpy(request.GetInBuffer().get(), task->datain.data, std::min(request.GetInBufferSize(), (unsigned int)task->datain.size)); break; default: break; } request.SetExecuted(); }