void ssc_read(scsi_task_t *task, stmf_data_buf_t *dbuf) { ssc_command_t *scmd; ASSERT(task); ASSERT(dbuf); scmd = task->task_lu_private; VERIFY(scmd); scmd->status = SAM4_STATUS_GOOD; scmd->saa = 0; scmd->flags &= ~SSC_TASK_STATUS_PENDING; stmf_scsilib_send_status(task, scmd->status, scmd->saa); }
static sbd_status_t sbd_ats_do_handling_before_io(scsi_task_t *task, struct sbd_lu *sl, uint64_t lba, uint64_t count, uint32_t flags) { sbd_status_t ret = SBD_SUCCESS; ats_state_t *ats_state, *ats_state_ret; sbd_cmd_t *scmd = (sbd_cmd_t *)task->task_lu_private; uint8_t cdb0 = task->task_cdb[0]; if (HardwareAcceleratedLocking == 0) return (SBD_SUCCESS); mutex_enter(&sl->sl_lock); /* * if the list is empty then just add the element to the list and * return success. There is no overlap. This is done for every * read, write or compare and write. */ if (list_is_empty(&sl->sl_ats_io_list)) { goto done; } /* * There are inflight operations. As a result the list must be scanned * and if there are any overlaps then SBD_BUSY should be returned. * * Duplicate reads and writes are allowed and kept on the list * since there is no reason that overlapping IO operations should * be delayed. * * A command that conflicts with a running compare and write will * be rescheduled and rerun. This is handled by stmf_task_poll_lu. * There is a possibility that a command can be starved and still * return busy, which is valid in the SCSI protocol. */ for (ats_state = list_head(&sl->sl_ats_io_list); ats_state != NULL; ats_state = list_next(&sl->sl_ats_io_list, ats_state)) { if (is_overlapping(ats_state->as_cur_ats_lba, ats_state->as_cur_ats_len, lba, count) == 0) continue; /* if the task is already listed just return */ if (task == ats_state->as_cur_ats_task) { cmn_err(CE_WARN, "sbd_ats_handling_before_io: " "task %p already on list", (void *) task); ret = SBD_SUCCESS; goto exit; } /* * the current command is a compare and write, if there is any * overlap return error */ if ((cdb0 == SCMD_COMPARE_AND_WRITE) || (ats_state->as_cmd == SCMD_COMPARE_AND_WRITE)) { ret = SBD_BUSY; goto exit; } } done: ats_state_ret = (ats_state_t *)kmem_zalloc(sizeof (ats_state_t), KM_SLEEP); ats_state_ret->as_cur_ats_lba = lba; ats_state_ret->as_cur_ats_len = count; ats_state_ret->as_cmd = cdb0; ats_state_ret->as_cur_ats_task = task; if (list_is_empty(&sl->sl_ats_io_list)) { list_insert_head(&sl->sl_ats_io_list, ats_state_ret); } else { list_insert_tail(&sl->sl_ats_io_list, ats_state_ret); } scmd->flags |= SBD_SCSI_CMD_ATS_RELATED; scmd->ats_state = ats_state; sbd_list_length++; mutex_exit(&sl->sl_lock); return (SBD_SUCCESS); exit: mutex_exit(&sl->sl_lock); if (ret == SBD_SUCCESS) return (SBD_SUCCESS); /* * if the command cannot be allowed to be restarted then just * return an error. At the moment only unmap has this property. * Please refer to sbd_handle_unmap_xfer in sbd_scsi.c for full * details. */ if ((flags & SBD_ATS_NO_BUSY) != 0) return (ret); /* * at this point the command overlaps a running a compare and write. * It is either a compare and write overlapping a op or an op * overlapping a compare and write. It needs to be delayed. That * means it is not active so if the stmf_task_poll_lu works * turn off active */ if (stmf_task_poll_lu(task, 10) != STMF_SUCCESS) stmf_scsilib_send_status(task, STATUS_BUSY, 0); else scmd->flags &= ~SBD_SCSI_CMD_ACTIVE; return (ret); }
void sbd_handle_ats_xfer_completion(struct scsi_task *task, sbd_cmd_t *scmd, struct stmf_data_buf *dbuf, uint8_t dbuf_reusable) { uint64_t laddr; uint32_t buflen, iolen, miscompare_off; int ndx; sbd_status_t ret; if (ATOMIC8_GET(scmd->nbufs) > 0) { atomic_dec_8(&scmd->nbufs); } if (dbuf->db_xfer_status != STMF_SUCCESS) { scmd->flags |= SBD_SCSI_CMD_ABORT_REQUESTED; sbd_ats_release_resources(task, scmd); stmf_abort(STMF_QUEUE_TASK_ABORT, task, dbuf->db_xfer_status, NULL); return; } /* if the command is no longer active return */ if (((scmd->flags & SBD_SCSI_CMD_ACTIVE) == 0) || (scmd->trans_data == NULL) || ((scmd->flags & SBD_SCSI_CMD_TRANS_DATA) == 0) || (scmd->nbufs == 0xff)) { cmn_err(CE_NOTE, "sbd_handle_ats_xfer_completion:handled" "unexpected completion"); return; } if (scmd->flags & SBD_SCSI_CMD_XFER_FAIL) { goto ATS_XFER_DONE; } if (ATOMIC32_GET(scmd->len) != 0) { /* * Initiate the next port xfer to occur in parallel * with writing this buf. A side effect of sbd_do_ats_xfer is * it may set scmd_len to 0. This means all the data * transfers have been started, not that they are done. */ sbd_do_ats_xfer(task, scmd, NULL, 0); } laddr = dbuf->db_relative_offset; for (buflen = 0, ndx = 0; (buflen < dbuf->db_data_size) && (ndx < dbuf->db_sglist_length); ndx++) { iolen = min(dbuf->db_data_size - buflen, dbuf->db_sglist[ndx].seg_length); if (iolen == 0) break; bcopy(dbuf->db_sglist[ndx].seg_addr, &scmd->trans_data[laddr], iolen); buflen += iolen; laddr += (uint64_t)iolen; } task->task_nbytes_transferred += buflen; ATS_XFER_DONE: if (ATOMIC32_GET(scmd->len) == 0 || scmd->flags & SBD_SCSI_CMD_XFER_FAIL) { stmf_free_dbuf(task, dbuf); /* * if this is not the last buffer to be transfered then exit * and wait for the next buffer. Once nbufs is 0 then all the * data has arrived and the compare can be done. */ if (ATOMIC8_GET(scmd->nbufs) > 0) { return; } if (scmd->flags & SBD_SCSI_CMD_XFER_FAIL) { sbd_ats_release_resources(task, scmd); stmf_scsilib_send_status(task, STATUS_CHECK, STMF_SAA_WRITE_ERROR); } else { ret = sbd_compare_and_write(task, scmd, &miscompare_off); /* * since stmf_scsilib_send_status may result in * the task being released clean up resources before * calling it. */ sbd_ats_release_resources(task, scmd); if (ret != SBD_SUCCESS) { if (ret != SBD_COMPARE_FAILED) { stmf_scsilib_send_status(task, STATUS_CHECK, STMF_SAA_WRITE_ERROR); } else { sbd_send_miscompare_status(task, miscompare_off); } } else { stmf_scsilib_send_status(task, STATUS_GOOD, 0); } } return; } sbd_do_ats_xfer(task, scmd, dbuf, dbuf_reusable); }