Esempio n. 1
0
/**
 * @brief This callback method asks the user to destory the supplied timer.
 *
 * @param[in]  controller This parameter specifies the controller with
 *             which this timer is to associated.
 * @param[in]  timer This parameter specifies the timer to be destroyed.
 *
 * @return none
 */
void
scif_cb_timer_destroy(SCI_CONTROLLER_HANDLE_T scif_controller,
                      void *timer_handle)
{
    struct ISCI_CONTROLLER *isci_controller = (struct ISCI_CONTROLLER *)
            sci_object_get_association(scif_controller);

    scif_cb_timer_stop(scif_controller, timer_handle);
    sci_pool_put(isci_controller->timer_pool, (struct ISCI_TIMER *)timer_handle);

    isci_log_message(3, "TIMER", "destroy %p\n", timer_handle);
}
Esempio n. 2
0
void isci_controller_construct(struct ISCI_CONTROLLER *controller,
    struct isci_softc *isci)
{
	SCI_CONTROLLER_HANDLE_T scif_controller_handle;

	scif_library_allocate_controller(isci->sci_library_handle,
	    &scif_controller_handle);

	scif_controller_construct(isci->sci_library_handle,
	    scif_controller_handle, NULL);

	controller->isci = isci;
	controller->scif_controller_handle = scif_controller_handle;

	/* This allows us to later use
	 *  sci_object_get_association(scif_controller_handle)
	 * inside of a callback routine to get our struct ISCI_CONTROLLER object
	 */
	sci_object_set_association(scif_controller_handle, (void *)controller);

	controller->is_started = FALSE;
	controller->is_frozen = FALSE;
	controller->release_queued_ccbs = FALSE;
	controller->sim = NULL;
	controller->initial_discovery_mask = 0;

	sci_fast_list_init(&controller->pending_device_reset_list);

	mtx_init(&controller->lock, "isci", NULL, MTX_DEF);

	uint32_t domain_index;

	for(domain_index = 0; domain_index < SCI_MAX_DOMAINS; domain_index++) {
		isci_domain_construct( &controller->domain[domain_index],
		    domain_index, controller);
	}

	controller->timer_memory = malloc(
	    sizeof(struct ISCI_TIMER) * SCI_MAX_TIMERS, M_ISCI,
	    M_NOWAIT | M_ZERO);

	sci_pool_initialize(controller->timer_pool);

	struct ISCI_TIMER *timer = (struct ISCI_TIMER *)
	    controller->timer_memory;

	for ( int i = 0; i < SCI_MAX_TIMERS; i++ ) {
		sci_pool_put(controller->timer_pool, timer++);
	}

	sci_pool_initialize(controller->unmap_buffer_pool);
}
/**
 * @brief This method will ensure all internal requests destined for
 *        devices contained in the supplied domain are properly removed
 *        from the high priority request queue.
 *
 * @param[in] fw_hprq This parameter specifies the high priority request
 *            queue object for which to attempt to remove elements.
 * @param[in] fw_domain This parameter specifies the domain for which to
 *            remove all high priority requests.
 *
 * @return none
 */
void scif_sas_high_priority_request_queue_purge_domain(
   SCIF_SAS_HIGH_PRIORITY_REQUEST_QUEUE_T * fw_hprq,
   SCIF_SAS_DOMAIN_T                      * fw_domain
)
{
   SCIF_SAS_IO_REQUEST_T * fw_io;
   POINTER_UINT            io_address;
   U32                     index;
   U32                     element_count;

   SCIF_LOG_TRACE((
      sci_base_object_get_logger(&fw_hprq->lock),
      SCIF_LOG_OBJECT_CONTROLLER | SCIF_LOG_OBJECT_DOMAIN_DISCOVERY,
      "scif_sas_high_priority_request_queue_purge_domain(0x%x,0x%x) enter\n",
      fw_hprq, fw_domain
   ));

   element_count = sci_pool_count(fw_hprq->pool);

   scif_cb_lock_acquire(fw_domain->controller, &fw_hprq->lock);

   for (index = 0; index < element_count; index++)
   {
      sci_pool_get(fw_hprq->pool, io_address);

      fw_io = (SCIF_SAS_IO_REQUEST_T*) io_address;

      // If the high priority request is not intended for this domain,
      // then it can be left in the pool.
      if (fw_io->parent.device->domain != fw_domain)
      {
         sci_pool_put(fw_hprq->pool, io_address);
      }
      else
      {
         if (fw_io->parent.is_internal)
         {
            SCIF_SAS_INTERNAL_IO_REQUEST_T * fw_internal_io =
               (SCIF_SAS_INTERNAL_IO_REQUEST_T *)fw_io;

            // The request was intended for a device in the domain.  Put it
            // back in the pool of freely available internal request memory
            // objects. The internal IO's timer is to be destoyed.
            scif_sas_internal_io_request_destruct(fw_domain->controller, fw_internal_io);
         }
      }
   }

   scif_cb_lock_release(fw_domain->controller, &fw_hprq->lock);
}
Esempio n. 4
0
/**
 * @brief This method will be invoked to allocate memory dynamically.
 *
 * @param[in]  controller This parameter represents the controller
 *             object for which to allocate memory.
 * @param[out] mde This parameter represents the memory descriptor to
 *             be filled in by the user that will reference the newly
 *             allocated memory.
 *
 * @return none
 */
void scif_cb_controller_free_memory(SCI_CONTROLLER_HANDLE_T controller,
    SCI_PHYSICAL_MEMORY_DESCRIPTOR_T * mde)
{
	struct ISCI_CONTROLLER *isci_controller = (struct ISCI_CONTROLLER *)
	    sci_object_get_association(controller);

	/*
	 * Put the buffer back into the controller's buffer pool, rather
	 * than invoking configfree.  This helps reduce chance we won't
	 * have buffers available when system is under memory pressure.
	 */ 
	sci_pool_put(isci_controller->unmap_buffer_pool,
	    mde->virtual_address);
}
Esempio n. 5
0
/**
 * @brief This routine is to free the memory for a completed internal request.
 *
 * @param[in] scif_controller handle to frame controller
 * @param[in] fw_internal_io The internal IO to be freed.
 *
 * @return none
 */
void scif_sas_controller_free_internal_request(
   SCIF_SAS_CONTROLLER_T * fw_controller,
   void                  * fw_internal_request_buffer
)
{
   SCIF_LOG_TRACE((
      sci_base_object_get_logger(fw_controller),
      SCIF_LOG_OBJECT_CONTROLLER | SCIF_LOG_OBJECT_IO_REQUEST,
      "scif_controller_free_internal_request(0x%x, 0x%x) enter\n",
      fw_controller, fw_internal_request_buffer
   ));

   //return the memory to to pool.
   if( !sci_pool_full(fw_controller->internal_request_memory_pool) )
   {
      sci_pool_put(
         fw_controller->internal_request_memory_pool,
         (POINTER_UINT) fw_internal_request_buffer
      );
   }
}
/**
 * @brief This method constructs an internal smp request.
 *
 * @param[in] fw_controller The framework controller
 * @param[in] fw_device The smp device that the internal io targets to.
 * @param[in] internal_io_memory The memory space for the internal io.
 * @param[in] io_tag The io tag for the internl io to be constructed.
 * @param[in] smp_command A pointer to the smp request data structure according
 *       to SAS protocol.
 *
 * @return Indicate if the internal io was successfully constructed.
 * @retval SCI_SUCCESS This value is returned if the internal io was
 *         successfully constructed.
 * @retval SCI_FAILURE This value is returned if the internal io was failed to
 *         be constructed.
 */
SCI_STATUS scif_sas_internal_io_request_construct_smp(
   SCIF_SAS_CONTROLLER_T       * fw_controller,
   SCIF_SAS_REMOTE_DEVICE_T    * fw_device,
   void                        * internal_io_memory,
   U16                           io_tag,
   SMP_REQUEST_T               * smp_command
)
{
   SCIF_SAS_INTERNAL_IO_REQUEST_T * fw_internal_io  =
     (SCIF_SAS_INTERNAL_IO_REQUEST_T*)internal_io_memory;

   SCIF_SAS_IO_REQUEST_T * fw_io =
     (SCIF_SAS_IO_REQUEST_T*)internal_io_memory;

   SCI_STATUS status;

   //call common smp request construct routine.
   status = scif_sas_io_request_construct_smp(
               fw_controller,
               fw_device,
               internal_io_memory,
               (char *)internal_io_memory + sizeof(SCIF_SAS_INTERNAL_IO_REQUEST_T),
               SCI_CONTROLLER_INVALID_IO_TAG,
               smp_command,
               NULL //there is no associated user io object.
            );

   //Codes below are all internal io related.
   if (status == SCI_SUCCESS)
   {
      //set the is_internal flag
      fw_io->parent.is_internal = TRUE;

      if (fw_internal_io->internal_io_timer == NULL)
      {
         //create the timer for this internal request.
         fw_internal_io->internal_io_timer =
            scif_cb_timer_create(
               (SCI_CONTROLLER_HANDLE_T *)fw_controller,
               scif_sas_internal_io_request_timeout_handler,
               (void*)fw_io
            );
      }
      else
      {
         ASSERT (0);
      }

      //insert into high priority queue
      if ( !sci_pool_full(fw_controller->hprq.pool) )
      {
         sci_pool_put(
            fw_controller->hprq.pool, (POINTER_UINT) internal_io_memory
         );
      }
      else
      {
         SCIF_LOG_ERROR((
            sci_base_object_get_logger(fw_controller),
            SCIF_LOG_OBJECT_CONTROLLER | SCIF_LOG_OBJECT_REMOTE_DEVICE,
            "scif_sas_internal_io_request_construct_smp, high priority queue full!\n"
         ));

         scif_sas_internal_io_request_destruct(fw_controller, fw_internal_io);

         //return failure status.
         return SCI_FAILURE_INSUFFICIENT_RESOURCES;
      }
   }

   return status;
}
Esempio n. 7
0
int isci_controller_allocate_memory(struct ISCI_CONTROLLER *controller)
{
	int error;
	device_t device =  controller->isci->device;
	uint32_t max_segment_size = isci_io_request_get_max_io_size();
	uint32_t status = 0;
	struct ISCI_MEMORY *uncached_controller_memory =
	    &controller->uncached_controller_memory;
	struct ISCI_MEMORY *cached_controller_memory =
	    &controller->cached_controller_memory;
	struct ISCI_MEMORY *request_memory =
	    &controller->request_memory;
	POINTER_UINT virtual_address;
	bus_addr_t physical_address;

	controller->mdl = sci_controller_get_memory_descriptor_list_handle(
	    controller->scif_controller_handle);

	uncached_controller_memory->size = sci_mdl_decorator_get_memory_size(
	    controller->mdl, SCI_MDE_ATTRIBUTE_PHYSICALLY_CONTIGUOUS);

	error = isci_allocate_dma_buffer(device, uncached_controller_memory);

	if (error != 0)
	    return (error);

	sci_mdl_decorator_assign_memory( controller->mdl,
	    SCI_MDE_ATTRIBUTE_PHYSICALLY_CONTIGUOUS,
	    uncached_controller_memory->virtual_address,
	    uncached_controller_memory->physical_address);

	cached_controller_memory->size = sci_mdl_decorator_get_memory_size(
	    controller->mdl,
	    SCI_MDE_ATTRIBUTE_CACHEABLE | SCI_MDE_ATTRIBUTE_PHYSICALLY_CONTIGUOUS
	);

	error = isci_allocate_dma_buffer(device, cached_controller_memory);

	if (error != 0)
	    return (error);

	sci_mdl_decorator_assign_memory(controller->mdl,
	    SCI_MDE_ATTRIBUTE_CACHEABLE | SCI_MDE_ATTRIBUTE_PHYSICALLY_CONTIGUOUS,
	    cached_controller_memory->virtual_address,
	    cached_controller_memory->physical_address);

	request_memory->size =
	    controller->queue_depth * isci_io_request_get_object_size();

	error = isci_allocate_dma_buffer(device, request_memory);

	if (error != 0)
	    return (error);

	/* For STP PIO testing, we want to ensure we can force multiple SGLs
	 *  since this has been a problem area in SCIL.  This tunable parameter
	 *  will allow us to force DMA segments to a smaller size, ensuring
	 *  that even if a physically contiguous buffer is attached to this
	 *  I/O, the DMA subsystem will pass us multiple segments in our DMA
	 *  load callback.
	 */
	TUNABLE_INT_FETCH("hw.isci.max_segment_size", &max_segment_size);

	/* Create DMA tag for our I/O requests.  Then we can create DMA maps based off
	 *  of this tag and store them in each of our ISCI_IO_REQUEST objects.  This
	 *  will enable better performance than creating the DMA maps everytime we get
	 *  an I/O.
	 */
	status = bus_dma_tag_create(bus_get_dma_tag(device), 0x1, 0x0,
	    BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL,
	    isci_io_request_get_max_io_size(),
	    SCI_MAX_SCATTER_GATHER_ELEMENTS, max_segment_size, 0, NULL, NULL,
	    &controller->buffer_dma_tag);

	sci_pool_initialize(controller->request_pool);

	virtual_address = request_memory->virtual_address;
	physical_address = request_memory->physical_address;

	for (int i = 0; i < controller->queue_depth; i++) {
		struct ISCI_REQUEST *request =
		    (struct ISCI_REQUEST *)virtual_address;

		isci_request_construct(request,
		    controller->scif_controller_handle,
		    controller->buffer_dma_tag, physical_address);

		sci_pool_put(controller->request_pool, request);

		virtual_address += isci_request_get_object_size();
		physical_address += isci_request_get_object_size();
	}

	uint32_t remote_device_size = sizeof(struct ISCI_REMOTE_DEVICE) +
	    scif_remote_device_get_object_size();

	controller->remote_device_memory = (uint8_t *) malloc(
	    remote_device_size * SCI_MAX_REMOTE_DEVICES, M_ISCI,
	    M_NOWAIT | M_ZERO);

	sci_pool_initialize(controller->remote_device_pool);

	uint8_t *remote_device_memory_ptr = controller->remote_device_memory;

	for (int i = 0; i < SCI_MAX_REMOTE_DEVICES; i++) {
		struct ISCI_REMOTE_DEVICE *remote_device =
		    (struct ISCI_REMOTE_DEVICE *)remote_device_memory_ptr;

		controller->remote_device[i] = NULL;
		remote_device->index = i;
		remote_device->is_resetting = FALSE;
		remote_device->frozen_lun_mask = 0;
		sci_fast_list_element_init(remote_device,
		    &remote_device->pending_device_reset_element);
		sci_pool_put(controller->remote_device_pool, remote_device);
		remote_device_memory_ptr += remote_device_size;
	}

	return (0);
}
Esempio n. 8
0
void
isci_io_request_complete(SCI_CONTROLLER_HANDLE_T scif_controller,
    SCI_REMOTE_DEVICE_HANDLE_T remote_device,
    struct ISCI_IO_REQUEST *isci_request, SCI_IO_STATUS completion_status)
{
	struct ISCI_CONTROLLER *isci_controller;
	struct ISCI_REMOTE_DEVICE *isci_remote_device;
	union ccb *ccb;
	BOOL complete_ccb;

	complete_ccb = TRUE;
	isci_controller = (struct ISCI_CONTROLLER *) sci_object_get_association(scif_controller);
	isci_remote_device =
		(struct ISCI_REMOTE_DEVICE *) sci_object_get_association(remote_device);

	ccb = isci_request->ccb;

	ccb->ccb_h.status &= ~CAM_STATUS_MASK;

	switch (completion_status) {
	case SCI_IO_SUCCESS:
	case SCI_IO_SUCCESS_COMPLETE_BEFORE_START:
#if __FreeBSD_version >= 900026
		if (ccb->ccb_h.func_code == XPT_SMP_IO) {
			void *smp_response =
			    scif_io_request_get_response_iu_address(
			        isci_request->sci_object);

			memcpy(ccb->smpio.smp_response, smp_response,
			    ccb->smpio.smp_response_len);
		}
#endif
		ccb->ccb_h.status |= CAM_REQ_CMP;
		break;

	case SCI_IO_SUCCESS_IO_DONE_EARLY:
		ccb->ccb_h.status |= CAM_REQ_CMP;
		ccb->csio.resid = ccb->csio.dxfer_len -
		    scif_io_request_get_number_of_bytes_transferred(
		        isci_request->sci_object);
		break;

	case SCI_IO_FAILURE_RESPONSE_VALID:
	{
		SCI_SSP_RESPONSE_IU_T * response_buffer;
		uint32_t sense_length;
		int error_code, sense_key, asc, ascq;
		struct ccb_scsiio *csio = &ccb->csio;

		response_buffer = (SCI_SSP_RESPONSE_IU_T *)
		    scif_io_request_get_response_iu_address(
		        isci_request->sci_object);

		sense_length = sci_ssp_get_sense_data_length(
		    response_buffer->sense_data_length);

		sense_length = MIN(csio->sense_len, sense_length);

		memcpy(&csio->sense_data, response_buffer->data, sense_length);

		csio->sense_resid = csio->sense_len - sense_length;
		csio->scsi_status = response_buffer->status;
		ccb->ccb_h.status |= CAM_SCSI_STATUS_ERROR;
		ccb->ccb_h.status |= CAM_AUTOSNS_VALID;
		scsi_extract_sense( &csio->sense_data, &error_code, &sense_key,
		    &asc, &ascq );
		isci_log_message(1, "ISCI",
		    "isci: bus=%x target=%x lun=%x cdb[0]=%x status=%x key=%x asc=%x ascq=%x\n",
		    ccb->ccb_h.path_id, ccb->ccb_h.target_id,
		    ccb->ccb_h.target_lun, csio->cdb_io.cdb_bytes[0],
		    csio->scsi_status, sense_key, asc, ascq);
		break;
	}

	case SCI_IO_FAILURE_REMOTE_DEVICE_RESET_REQUIRED:
		isci_remote_device_reset(isci_remote_device, NULL);

		/* drop through */
	case SCI_IO_FAILURE_TERMINATED:
		ccb->ccb_h.status |= CAM_REQ_TERMIO;
		isci_log_message(1, "ISCI",
		    "isci: bus=%x target=%x lun=%x cdb[0]=%x terminated\n",
		    ccb->ccb_h.path_id, ccb->ccb_h.target_id,
		    ccb->ccb_h.target_lun, ccb->csio.cdb_io.cdb_bytes[0]);
		break;

	case SCI_IO_FAILURE_INVALID_STATE:
	case SCI_IO_FAILURE_INSUFFICIENT_RESOURCES:
		complete_ccb = FALSE;
		break;

	case SCI_IO_FAILURE_INVALID_REMOTE_DEVICE:
		ccb->ccb_h.status |= CAM_DEV_NOT_THERE;
		break;

	case SCI_IO_FAILURE_NO_NCQ_TAG_AVAILABLE:
		{
			struct ccb_relsim ccb_relsim;
			struct cam_path *path;

			xpt_create_path(&path, NULL,
			    cam_sim_path(isci_controller->sim),
			    isci_remote_device->index, 0);

			xpt_setup_ccb(&ccb_relsim.ccb_h, path, 5);
			ccb_relsim.ccb_h.func_code = XPT_REL_SIMQ;
			ccb_relsim.ccb_h.flags = CAM_DEV_QFREEZE;
			ccb_relsim.release_flags = RELSIM_ADJUST_OPENINGS;
			ccb_relsim.openings =
			    scif_remote_device_get_max_queue_depth(remote_device);
			xpt_action((union ccb *)&ccb_relsim);
			xpt_free_path(path);
			complete_ccb = FALSE;
		}
		break;

	case SCI_IO_FAILURE:
	case SCI_IO_FAILURE_REQUIRES_SCSI_ABORT:
	case SCI_IO_FAILURE_UNSUPPORTED_PROTOCOL:
	case SCI_IO_FAILURE_PROTOCOL_VIOLATION:
	case SCI_IO_FAILURE_INVALID_PARAMETER_VALUE:
	case SCI_IO_FAILURE_CONTROLLER_SPECIFIC_ERR:
	default:
		isci_log_message(1, "ISCI",
		    "isci: bus=%x target=%x lun=%x cdb[0]=%x completion status=%x\n",
		    ccb->ccb_h.path_id, ccb->ccb_h.target_id,
		    ccb->ccb_h.target_lun, ccb->csio.cdb_io.cdb_bytes[0],
		    completion_status);
		ccb->ccb_h.status |= CAM_REQ_CMP_ERR;
		break;
	}

	callout_stop(&isci_request->parent.timer);
	bus_dmamap_sync(isci_request->parent.dma_tag,
	    isci_request->parent.dma_map,
	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);

	bus_dmamap_unload(isci_request->parent.dma_tag,
	    isci_request->parent.dma_map);

	isci_request->ccb = NULL;

	sci_pool_put(isci_controller->request_pool,
	    (struct ISCI_REQUEST *)isci_request);

	if (complete_ccb) {
		if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
			/* ccb will be completed with some type of non-success
			 *  status.  So temporarily freeze the queue until the
			 *  upper layers can act on the status.  The
			 *  CAM_DEV_QFRZN flag will then release the queue
			 *  after the status is acted upon.
			 */
			ccb->ccb_h.status |= CAM_DEV_QFRZN;
			xpt_freeze_devq(ccb->ccb_h.path, 1);
		}

		if (ccb->ccb_h.status & CAM_SIM_QUEUED) {

			KASSERT(ccb == isci_remote_device->queued_ccb_in_progress,
			    ("multiple internally queued ccbs in flight"));

			TAILQ_REMOVE(&isci_remote_device->queued_ccbs,
			    &ccb->ccb_h, sim_links.tqe);
			ccb->ccb_h.status &= ~CAM_SIM_QUEUED;

			/*
			 * This CCB that was in the queue was completed, so
			 *  set the in_progress pointer to NULL denoting that
			 *  we can retry another CCB from the queue.  We only
			 *  allow one CCB at a time from the queue to be
			 *  in progress so that we can effectively maintain
			 *  ordering.
			 */
			isci_remote_device->queued_ccb_in_progress = NULL;
		}

		if (isci_remote_device->frozen_lun_mask != 0) {
			isci_remote_device_release_device_queue(isci_remote_device);
		}

		xpt_done(ccb);

		if (isci_controller->is_frozen == TRUE) {
			isci_controller->is_frozen = FALSE;
			xpt_release_simq(isci_controller->sim, TRUE);
		}
	} else {
		isci_remote_device_freeze_lun_queue(isci_remote_device,
		    ccb->ccb_h.target_lun);

		if (ccb->ccb_h.status & CAM_SIM_QUEUED) {

			KASSERT(ccb == isci_remote_device->queued_ccb_in_progress,
			    ("multiple internally queued ccbs in flight"));

			/*
			 *  Do nothing, CCB is already on the device's queue.
			 *   We leave it on the queue, to be retried again
			 *   next time a CCB on this device completes, or we
			 *   get a ready notification for this device.
			 */
			isci_log_message(1, "ISCI", "already queued %p %x\n",
			    ccb, ccb->csio.cdb_io.cdb_bytes[0]);

			isci_remote_device->queued_ccb_in_progress = NULL;

		} else {
			isci_log_message(1, "ISCI", "queue %p %x\n", ccb,
			    ccb->csio.cdb_io.cdb_bytes[0]);
			ccb->ccb_h.status |= CAM_SIM_QUEUED;

			TAILQ_INSERT_TAIL(&isci_remote_device->queued_ccbs,
			    &ccb->ccb_h, sim_links.tqe);
		}
	}
}
Esempio n. 9
0
void
isci_task_request_complete(SCI_CONTROLLER_HANDLE_T scif_controller,
    SCI_REMOTE_DEVICE_HANDLE_T remote_device,
    SCI_TASK_REQUEST_HANDLE_T task_request, SCI_TASK_STATUS completion_status)
{
	struct ISCI_TASK_REQUEST *isci_task_request =
		(struct ISCI_TASK_REQUEST *)sci_object_get_association(task_request);
	struct ISCI_CONTROLLER *isci_controller =
		(struct ISCI_CONTROLLER *)sci_object_get_association(scif_controller);
	struct ISCI_REMOTE_DEVICE *isci_remote_device =
		(struct ISCI_REMOTE_DEVICE *)sci_object_get_association(remote_device);
	struct ISCI_REMOTE_DEVICE *pending_remote_device;
	BOOL retry_task = FALSE;
	union ccb *ccb = isci_task_request->ccb;

	isci_remote_device->is_resetting = FALSE;

	switch ((int)completion_status) {
	case SCI_TASK_SUCCESS:
	case SCI_TASK_FAILURE_RESPONSE_VALID:
		break;

	case SCI_TASK_FAILURE_INVALID_STATE:
		retry_task = TRUE;
		isci_log_message(0, "ISCI",
		    "task failure (invalid state) - retrying\n");
		break;

	case SCI_TASK_FAILURE_INSUFFICIENT_RESOURCES:
		retry_task = TRUE;
		isci_log_message(0, "ISCI",
		    "task failure (insufficient resources) - retrying\n");
		break;

	case SCI_FAILURE_TIMEOUT:
		if (isci_controller->fail_on_task_timeout) {
			retry_task = FALSE;
			isci_log_message(0, "ISCI",
			    "task timeout - not retrying\n");
			scif_cb_domain_device_removed(isci_controller,
			    isci_remote_device->domain, isci_remote_device);
		} else {
			retry_task = TRUE;
			isci_log_message(0, "ISCI",
			    "task timeout - retrying\n");
		}
		break;

	case SCI_TASK_FAILURE:
	case SCI_TASK_FAILURE_UNSUPPORTED_PROTOCOL:
	case SCI_TASK_FAILURE_INVALID_TAG:
	case SCI_TASK_FAILURE_CONTROLLER_SPECIFIC_ERR:
	case SCI_TASK_FAILURE_TERMINATED:
	case SCI_TASK_FAILURE_INVALID_PARAMETER_VALUE:
		isci_log_message(0, "ISCI",
		    "unhandled task completion code 0x%x\n", completion_status);
		break;

	default:
		isci_log_message(0, "ISCI",
		    "unhandled task completion code 0x%x\n", completion_status);
		break;
	}

	if (isci_controller->is_frozen == TRUE) {
		isci_controller->is_frozen = FALSE;
		xpt_release_simq(isci_controller->sim, TRUE);
	}

	sci_pool_put(isci_controller->request_pool,
	    (struct ISCI_REQUEST *)isci_task_request);

	/* Make sure we release the device queue, since it may have been frozen
	 *  if someone tried to start an I/O while the task was in progress.
	 */
	isci_remote_device_release_device_queue(isci_remote_device);

	if (retry_task == TRUE)
		isci_remote_device_reset(isci_remote_device, ccb);
	else {
		pending_remote_device = sci_fast_list_remove_head(
		    &isci_controller->pending_device_reset_list);

		if (pending_remote_device != NULL) {
			/* Any resets that were triggered from an XPT_RESET_DEV
			 *  CCB are never put in the pending list if the request
			 *  pool is empty - they are given back to CAM to be
			 *  requeued.  So we will alawys pass NULL here,
			 *  denoting that there is no CCB associated with the
			 *  device reset.
			 */
			isci_remote_device_reset(pending_remote_device, NULL);
		} else if (ccb != NULL) {
			/* There was a CCB associated with this reset, so mark
			 *  it complete and return it to CAM.
			 */
			ccb->ccb_h.status &= ~CAM_STATUS_MASK;
			ccb->ccb_h.status |= CAM_REQ_CMP;
			xpt_done(ccb);
		}
	}
}
Esempio n. 10
0
/**
 * @brief This method retry the external smp request.
 *
 * @param[in] fw_device This parameter specifies the remote device for
 *            which the internal IO request is destined.
 * @param[in] old_internal_io This parameter specifies the old smp request to be
 *            retried.
 *
 * @return none.
 */
SCI_STATUS scif_sas_smp_external_request_retry(
   SCIF_SAS_IO_REQUEST_T    * old_io
)
{
   SCIF_SAS_REMOTE_DEVICE_T * fw_device = old_io->parent.device;
   SCIF_SAS_CONTROLLER_T * fw_controller;
   SCIF_SAS_IO_REQUEST_T * new_io;
   void                  * new_request_memory = NULL;
   U8                      retry_count = old_io->retry_count;

   SCIF_LOG_TRACE((
      sci_base_object_get_logger(fw_device),
      SCIF_LOG_OBJECT_IO_REQUEST | SCIF_LOG_OBJECT_DOMAIN_DISCOVERY,
      "scif_sas_smp_external_request_retry(0x%x) time %d!\n",
      old_io
   ));

   fw_controller = fw_device->domain->controller;

   // Before we construct new io using the same memory, we need to
   // remove the IO from the list of outstanding requests on the domain
   // so that we don't damage the domain's fast list of request.
   sci_fast_list_remove_element(&old_io->parent.list_element);

   switch (fw_device->protocol_device.smp_device.current_smp_request)
   {
      case SMP_FUNCTION_DISCOVER:
         //we are retrying an external io, we are going to reuse the
         //old io's memory. new_request_memory is same as old_io.
         new_request_memory = scif_sas_smp_request_construct_discover(
            fw_controller, fw_device,
            fw_device->protocol_device.smp_device.current_activity_phy_index,
            (void *)sci_object_get_association(old_io),
            (void *)old_io
         );

         break;

      case SMP_FUNCTION_PHY_CONTROL:
         //Phy Control command always uses external io memory.
         new_request_memory = scif_sas_smp_request_construct_phy_control(
            fw_controller, fw_device, PHY_OPERATION_HARD_RESET,
            fw_device->protocol_device.smp_device.current_activity_phy_index,
            (void *)sci_object_get_association(old_io),
            (void *)old_io
         );

         break;

      default:
         //unsupported case, TBD
         return SCI_FAILURE;
   } //end of switch

   //set the retry count to new built smp request.
   new_io = (SCIF_SAS_IO_REQUEST_T *) new_request_memory;
   new_io->retry_count = ++retry_count;

   //put into the high priority queue.
   sci_pool_put(fw_controller->hprq.pool, (POINTER_UINT) new_request_memory);

   //schedule the DPC to start new io.
   scif_cb_start_internal_io_task_schedule(
      fw_controller, scif_sas_controller_start_high_priority_io, fw_controller
   );

   return SCI_SUCCESS;
}
Esempio n. 11
0
void
isci_io_request_complete(SCI_CONTROLLER_HANDLE_T scif_controller,
    SCI_REMOTE_DEVICE_HANDLE_T remote_device,
    struct ISCI_IO_REQUEST *isci_request, SCI_IO_STATUS completion_status)
{
	struct ISCI_CONTROLLER *isci_controller;
	struct ISCI_REMOTE_DEVICE *isci_remote_device;
	union ccb *ccb;

	isci_controller = (struct ISCI_CONTROLLER *) sci_object_get_association(scif_controller);
	isci_remote_device =
		(struct ISCI_REMOTE_DEVICE *) sci_object_get_association(remote_device);

	ccb = isci_request->ccb;

	ccb->ccb_h.status &= ~CAM_STATUS_MASK;

	switch (completion_status) {
	case SCI_IO_SUCCESS:
	case SCI_IO_SUCCESS_COMPLETE_BEFORE_START:
#if __FreeBSD_version >= 900026
		if (ccb->ccb_h.func_code == XPT_SMP_IO) {
			void *smp_response =
			    scif_io_request_get_response_iu_address(
			        isci_request->sci_object);

			memcpy(ccb->smpio.smp_response, smp_response,
			    ccb->smpio.smp_response_len);
		}
#endif
		ccb->ccb_h.status |= CAM_REQ_CMP;
		break;

	case SCI_IO_SUCCESS_IO_DONE_EARLY:
		ccb->ccb_h.status |= CAM_REQ_CMP;
		ccb->csio.resid = ccb->csio.dxfer_len -
		    scif_io_request_get_number_of_bytes_transferred(
		        isci_request->sci_object);
		break;

	case SCI_IO_FAILURE_RESPONSE_VALID:
	{
		SCI_SSP_RESPONSE_IU_T * response_buffer;
		uint32_t sense_length;
		int error_code, sense_key, asc, ascq;
		struct ccb_scsiio *csio = &ccb->csio;

		response_buffer = (SCI_SSP_RESPONSE_IU_T *)
		    scif_io_request_get_response_iu_address(
		        isci_request->sci_object);

		sense_length = sci_ssp_get_sense_data_length(
		    response_buffer->sense_data_length);

		sense_length = MIN(csio->sense_len, sense_length);

		memcpy(&csio->sense_data, response_buffer->data, sense_length);

		csio->sense_resid = csio->sense_len - sense_length;
		csio->scsi_status = response_buffer->status;
		ccb->ccb_h.status |= CAM_SCSI_STATUS_ERROR;
		ccb->ccb_h.status |= CAM_AUTOSNS_VALID;
		scsi_extract_sense( &csio->sense_data, &error_code, &sense_key,
		    &asc, &ascq );
		isci_log_message(1, "ISCI",
		    "isci: bus=%x target=%x lun=%x cdb[0]=%x status=%x key=%x asc=%x ascq=%x\n",
		    ccb->ccb_h.path_id, ccb->ccb_h.target_id,
		    ccb->ccb_h.target_lun, csio->cdb_io.cdb_bytes[0],
		    csio->scsi_status, sense_key, asc, ascq);
		break;
	}

	case SCI_IO_FAILURE_REMOTE_DEVICE_RESET_REQUIRED:
		isci_remote_device_reset(isci_remote_device, NULL);

		/* drop through */
	case SCI_IO_FAILURE_TERMINATED:
		ccb->ccb_h.status |= CAM_REQ_TERMIO;
		isci_log_message(1, "ISCI",
		    "isci: bus=%x target=%x lun=%x cdb[0]=%x terminated\n",
		    ccb->ccb_h.path_id, ccb->ccb_h.target_id,
		    ccb->ccb_h.target_lun, ccb->csio.cdb_io.cdb_bytes[0]);
		break;

	case SCI_IO_FAILURE_INVALID_STATE:
	case SCI_IO_FAILURE_INSUFFICIENT_RESOURCES:
		ccb->ccb_h.status |= CAM_REQUEUE_REQ;
		isci_remote_device_freeze_lun_queue(isci_remote_device,
		    ccb->ccb_h.target_lun);
		break;

	case SCI_IO_FAILURE_INVALID_REMOTE_DEVICE:
		ccb->ccb_h.status |= CAM_DEV_NOT_THERE;
		break;

	case SCI_IO_FAILURE_NO_NCQ_TAG_AVAILABLE:
		{
			struct ccb_relsim ccb_relsim;
			struct cam_path *path;

			xpt_create_path(&path, NULL,
			    cam_sim_path(isci_controller->sim),
			    isci_remote_device->index, 0);

			xpt_setup_ccb(&ccb_relsim.ccb_h, path, 5);
			ccb_relsim.ccb_h.func_code = XPT_REL_SIMQ;
			ccb_relsim.ccb_h.flags = CAM_DEV_QFREEZE;
			ccb_relsim.release_flags = RELSIM_ADJUST_OPENINGS;
			ccb_relsim.openings =
			    scif_remote_device_get_max_queue_depth(remote_device);
			xpt_action((union ccb *)&ccb_relsim);
			xpt_free_path(path);
			ccb->ccb_h.status |= CAM_REQUEUE_REQ;
		}
		break;

	case SCI_IO_FAILURE:
	case SCI_IO_FAILURE_REQUIRES_SCSI_ABORT:
	case SCI_IO_FAILURE_UNSUPPORTED_PROTOCOL:
	case SCI_IO_FAILURE_PROTOCOL_VIOLATION:
	case SCI_IO_FAILURE_INVALID_PARAMETER_VALUE:
	case SCI_IO_FAILURE_CONTROLLER_SPECIFIC_ERR:
	default:
		isci_log_message(1, "ISCI",
		    "isci: bus=%x target=%x lun=%x cdb[0]=%x completion status=%x\n",
		    ccb->ccb_h.path_id, ccb->ccb_h.target_id,
		    ccb->ccb_h.target_lun, ccb->csio.cdb_io.cdb_bytes[0],
		    completion_status);
		ccb->ccb_h.status |= CAM_REQ_CMP_ERR;
		break;
	}

	if (ccb->ccb_h.status != CAM_REQ_CMP) {
		/* ccb will be completed with some type of non-success
		 *  status.  So temporarily freeze the queue until the
		 *  upper layers can act on the status.  The CAM_DEV_QFRZN
		 *  flag will then release the queue after the status is
		 *  acted upon.
		 */
		ccb->ccb_h.status |= CAM_DEV_QFRZN;
		xpt_freeze_devq(ccb->ccb_h.path, 1);
	}

	callout_stop(&isci_request->parent.timer);
	bus_dmamap_sync(isci_request->parent.dma_tag,
	    isci_request->parent.dma_map,
	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);

	bus_dmamap_unload(isci_request->parent.dma_tag,
	    isci_request->parent.dma_map);

	if (isci_remote_device->frozen_lun_mask != 0 &&
	    !(ccb->ccb_h.status & CAM_REQUEUE_REQ))
		isci_remote_device_release_device_queue(isci_remote_device);

	xpt_done(ccb);
	isci_request->ccb = NULL;

	if (isci_controller->is_frozen == TRUE) {
		isci_controller->is_frozen = FALSE;
		xpt_release_simq(isci_controller->sim, TRUE);
	}

	sci_pool_put(isci_controller->request_pool,
	    (struct ISCI_REQUEST *)isci_request);
}