Ejemplo n.º 1
0
/**
 * @brief this routine is called by OS' DPC to start io requests from internal
 *        high priority request queue
 * @param[in] fw_controller The framework controller.
 *
 * @return none
 */
void scif_sas_controller_start_high_priority_io(
   SCIF_SAS_CONTROLLER_T * fw_controller
)
{
   POINTER_UINT            io_address;
   SCIF_SAS_IO_REQUEST_T * fw_io;
   SCI_STATUS              status;

   SCIF_LOG_TRACE((
      sci_base_object_get_logger(fw_controller),
      SCIF_LOG_OBJECT_CONTROLLER | SCIF_LOG_OBJECT_IO_REQUEST,
      "scif_controller_start_high_priority_io(0x%x) enter\n",
      fw_controller
   ));

   while ( !sci_pool_empty(fw_controller->hprq.pool) )
   {
      sci_pool_get(fw_controller->hprq.pool, io_address);

      fw_io = (SCIF_SAS_IO_REQUEST_T *)io_address;

      status = fw_controller->state_handlers->start_high_priority_io_handler(
         (SCI_BASE_CONTROLLER_T*) fw_controller,
         (SCI_BASE_REMOTE_DEVICE_T*) fw_io->parent.device,
         (SCI_BASE_REQUEST_T*) fw_io,
         SCI_CONTROLLER_INVALID_IO_TAG
      );
   }
}
Ejemplo n.º 2
0
void
isci_io_request_execute_scsi_io(union ccb *ccb,
    struct ISCI_CONTROLLER *controller)
{
	struct ccb_scsiio *csio = &ccb->csio;
	target_id_t target_id = ccb->ccb_h.target_id;
	struct ISCI_REQUEST *request;
	struct ISCI_IO_REQUEST *io_request;
	struct ISCI_REMOTE_DEVICE *device =
	    controller->remote_device[target_id];
	int error;

	if (device == NULL) {
		ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
		ccb->ccb_h.status &= ~CAM_STATUS_MASK;
		ccb->ccb_h.status |= CAM_DEV_NOT_THERE;
		xpt_done(ccb);
		return;
	}

	if (sci_pool_empty(controller->request_pool)) {
		ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
		ccb->ccb_h.status &= ~CAM_STATUS_MASK;
		ccb->ccb_h.status |= CAM_REQUEUE_REQ;
		xpt_freeze_simq(controller->sim, 1);
		controller->is_frozen = TRUE;
		xpt_done(ccb);
		return;
	}

	ASSERT(device->is_resetting == FALSE);

	sci_pool_get(controller->request_pool, request);
	io_request = (struct ISCI_IO_REQUEST *)request;

	io_request->ccb = ccb;
	io_request->current_sge_index = 0;
	io_request->parent.remote_device_handle = device->sci_object;

	if ((ccb->ccb_h.flags & CAM_SCATTER_VALID) != 0)
		panic("Unexpected CAM_SCATTER_VALID flag!  flags = 0x%x\n",
		    ccb->ccb_h.flags);

	if ((ccb->ccb_h.flags & CAM_DATA_PHYS) != 0)
		panic("Unexpected CAM_DATA_PHYS flag!  flags = 0x%x\n",
		    ccb->ccb_h.flags);

	error = bus_dmamap_load(io_request->parent.dma_tag,
	    io_request->parent.dma_map, csio->data_ptr, csio->dxfer_len,
	    isci_io_request_construct, io_request, 0x0);

	/* A resource shortage from BUSDMA will be automatically
	 * continued at a later point, pushing the CCB processing
	 * forward, which will in turn unfreeze the simq.
	 */
	if (error == EINPROGRESS) {
		xpt_freeze_simq(controller->sim, 1);
		ccb->ccb_h.flags |= CAM_RELEASE_SIMQ;
	}
}
Ejemplo n.º 3
0
/**
 * @brief This method will be invoked to allocate memory dynamically.
 *
 * @param[in]  controller This parameter represents the controller
 *             object for which to allocate memory.
 * @param[out] mde This parameter represents the memory descriptor to
 *             be filled in by the user that will reference the newly
 *             allocated memory.
 *
 * @return none
 */
void scif_cb_controller_allocate_memory(SCI_CONTROLLER_HANDLE_T controller,
    SCI_PHYSICAL_MEMORY_DESCRIPTOR_T *mde)
{
	struct ISCI_CONTROLLER *isci_controller = (struct ISCI_CONTROLLER *)
	    sci_object_get_association(controller);

	/*
	 * Note this routine is only used for buffers needed to translate
	 * SCSI UNMAP commands to ATA DSM commands for SATA disks.
	 *
	 * We first try to pull a buffer from the controller's pool, and only
	 * call contigmalloc if one isn't there.
	 */
	if (!sci_pool_empty(isci_controller->unmap_buffer_pool)) {
		sci_pool_get(isci_controller->unmap_buffer_pool,
		    mde->virtual_address);
	} else
		mde->virtual_address = contigmalloc(PAGE_SIZE,
		    M_ISCI, M_NOWAIT, 0, BUS_SPACE_MAXADDR,
		    mde->constant_memory_alignment, 0);

	if (mde->virtual_address != NULL)
		bus_dmamap_load(isci_controller->buffer_dma_tag,
		    NULL, mde->virtual_address, PAGE_SIZE,
		    isci_single_map, &mde->physical_address,
		    BUS_DMA_NOWAIT);
}
/**
 * @brief This method will ensure all internal requests destined for
 *        devices contained in the supplied domain are properly removed
 *        from the high priority request queue.
 *
 * @param[in] fw_hprq This parameter specifies the high priority request
 *            queue object for which to attempt to remove elements.
 * @param[in] fw_domain This parameter specifies the domain for which to
 *            remove all high priority requests.
 *
 * @return none
 */
void scif_sas_high_priority_request_queue_purge_domain(
   SCIF_SAS_HIGH_PRIORITY_REQUEST_QUEUE_T * fw_hprq,
   SCIF_SAS_DOMAIN_T                      * fw_domain
)
{
   SCIF_SAS_IO_REQUEST_T * fw_io;
   POINTER_UINT            io_address;
   U32                     index;
   U32                     element_count;

   SCIF_LOG_TRACE((
      sci_base_object_get_logger(&fw_hprq->lock),
      SCIF_LOG_OBJECT_CONTROLLER | SCIF_LOG_OBJECT_DOMAIN_DISCOVERY,
      "scif_sas_high_priority_request_queue_purge_domain(0x%x,0x%x) enter\n",
      fw_hprq, fw_domain
   ));

   element_count = sci_pool_count(fw_hprq->pool);

   scif_cb_lock_acquire(fw_domain->controller, &fw_hprq->lock);

   for (index = 0; index < element_count; index++)
   {
      sci_pool_get(fw_hprq->pool, io_address);

      fw_io = (SCIF_SAS_IO_REQUEST_T*) io_address;

      // If the high priority request is not intended for this domain,
      // then it can be left in the pool.
      if (fw_io->parent.device->domain != fw_domain)
      {
         sci_pool_put(fw_hprq->pool, io_address);
      }
      else
      {
         if (fw_io->parent.is_internal)
         {
            SCIF_SAS_INTERNAL_IO_REQUEST_T * fw_internal_io =
               (SCIF_SAS_INTERNAL_IO_REQUEST_T *)fw_io;

            // The request was intended for a device in the domain.  Put it
            // back in the pool of freely available internal request memory
            // objects. The internal IO's timer is to be destoyed.
            scif_sas_internal_io_request_destruct(fw_domain->controller, fw_internal_io);
         }
      }
   }

   scif_cb_lock_release(fw_domain->controller, &fw_hprq->lock);
}
Ejemplo n.º 5
0
/**
 * @brief This callback method asks the user to create a timer and provide
 *        a handle for this timer for use in further timer interactions.
 *
 * @warning The "timer_callback" method should be executed in a mutually
 *          exlusive manner from the controller completion handler
 *          handler (refer to scic_controller_get_handler_methods()).
 *
 * @param[in]  timer_callback This parameter specifies the callback method
 *             to be invoked whenever the timer expires.
 * @param[in]  controller This parameter specifies the controller with
 *             which this timer is to be associated.
 * @param[in]  cookie This parameter specifies a piece of information that
 *             the user must retain.  This cookie is to be supplied by the
 *             user anytime a timeout occurs for the created timer.
 *
 * @return This method returns a handle to a timer object created by the
 *         user.  The handle will be utilized for all further interactions
 *         relating to this timer.
 */
void *
scif_cb_timer_create(SCI_CONTROLLER_HANDLE_T scif_controller,
                     SCI_TIMER_CALLBACK_T timer_callback, void *cookie)
{
    struct ISCI_CONTROLLER *isci_controller = (struct ISCI_CONTROLLER *)
            sci_object_get_association(scif_controller);
    struct ISCI_TIMER *timer;

    sci_pool_get(isci_controller->timer_pool, timer);

    callout_init_mtx(&timer->callout, &isci_controller->lock, FALSE);

    timer->callback = timer_callback;
    timer->cookie = cookie;
    timer->is_started = FALSE;

    isci_log_message(3, "TIMER", "create %p %p %p\n", timer, timer_callback, cookie);

    return (timer);
}
Ejemplo n.º 6
0
/**
 * @brief This routine is to allocate the memory for creating a new internal
 *        request.
 *
 * @param[in] scif_controller handle to frame controller
 *
 * @return void* address to internal request memory
 */
void * scif_sas_controller_allocate_internal_request(
   SCIF_SAS_CONTROLLER_T * fw_controller
)
{
   POINTER_UINT internal_io_address;

   if( !sci_pool_empty(fw_controller->internal_request_memory_pool) )
   {
      sci_pool_get(
         fw_controller->internal_request_memory_pool, internal_io_address
      );

      //clean the memory.
      memset((char*)internal_io_address, 0, scif_sas_internal_request_get_object_size());

      return (void *) internal_io_address;
   }
   else
      return NULL;
}
Ejemplo n.º 7
0
static int
isci_detach(device_t device)
{
    struct isci_softc *isci = DEVICE2SOFTC(device);
    int i, phy;

    for (i = 0; i < isci->controller_count; i++) {
        struct ISCI_CONTROLLER *controller = &isci->controllers[i];
        SCI_STATUS status;
        void *unmap_buffer;

        if (controller->scif_controller_handle != NULL) {
            scic_controller_disable_interrupts(
                scif_controller_get_scic_handle(controller->scif_controller_handle));

            mtx_lock(&controller->lock);
            status = scif_controller_stop(controller->scif_controller_handle, 0);
            mtx_unlock(&controller->lock);

            while (controller->is_started == TRUE) {
                /* Now poll for interrupts until the controller stop complete
                 *  callback is received.
                 */
                mtx_lock(&controller->lock);
                isci_interrupt_poll_handler(controller);
                mtx_unlock(&controller->lock);
                pause("isci", 1);
            }

            if(controller->sim != NULL) {
                mtx_lock(&controller->lock);
                xpt_free_path(controller->path);
                xpt_bus_deregister(cam_sim_path(controller->sim));
                cam_sim_free(controller->sim, TRUE);
                mtx_unlock(&controller->lock);
            }
        }

        if (controller->timer_memory != NULL)
            free(controller->timer_memory, M_ISCI);

        if (controller->remote_device_memory != NULL)
            free(controller->remote_device_memory, M_ISCI);

        for (phy = 0; phy < SCI_MAX_PHYS; phy++) {
            if (controller->phys[phy].cdev_fault)
                led_destroy(controller->phys[phy].cdev_fault);

            if (controller->phys[phy].cdev_locate)
                led_destroy(controller->phys[phy].cdev_locate);
        }

        while (1) {
            sci_pool_get(controller->unmap_buffer_pool, unmap_buffer);
            if (unmap_buffer == NULL)
                break;
            contigfree(unmap_buffer, PAGE_SIZE, M_ISCI);
        }
    }

    /* The SCIF controllers have been stopped, so we can now
     *  free the SCI library memory.
     */
    if (isci->sci_library_memory != NULL)
        free(isci->sci_library_memory, M_ISCI);

    for (i = 0; i < ISCI_NUM_PCI_BARS; i++)
    {
        struct ISCI_PCI_BAR *pci_bar = &isci->pci_bar[i];

        if (pci_bar->resource != NULL)
            bus_release_resource(device, SYS_RES_MEMORY,
                                 pci_bar->resource_id, pci_bar->resource);
    }

    for (i = 0; i < isci->num_interrupts; i++)
    {
        struct ISCI_INTERRUPT_INFO *interrupt_info;

        interrupt_info = &isci->interrupt_info[i];

        if(interrupt_info->tag != NULL)
            bus_teardown_intr(device, interrupt_info->res,
                              interrupt_info->tag);

        if(interrupt_info->res != NULL)
            bus_release_resource(device, SYS_RES_IRQ,
                                 rman_get_rid(interrupt_info->res),
                                 interrupt_info->res);

        pci_release_msi(device);
    }
    pci_disable_busmaster(device);

    return (0);
}
Ejemplo n.º 8
0
void
isci_remote_device_reset(struct ISCI_REMOTE_DEVICE *remote_device,
    union ccb *ccb)
{
	struct ISCI_CONTROLLER *controller = remote_device->domain->controller;
	struct ISCI_REQUEST *request;
	struct ISCI_TASK_REQUEST *task_request;
	SCI_STATUS status;

	if (remote_device->is_resetting == TRUE) {
		/* device is already being reset, so return immediately */
		return;
	}

	if (sci_pool_empty(controller->request_pool)) {
		/* No requests are available in our request pool.  If this reset is tied
		 *  to a CCB, ask CAM to requeue it.  Otherwise, we need to put it on our
		 *  pending device reset list, so that the reset will occur when a request
		 *  frees up.
		 */
		if (ccb == NULL)
			sci_fast_list_insert_tail(
			    &controller->pending_device_reset_list,
			    &remote_device->pending_device_reset_element);
		else {
			ccb->ccb_h.status &= ~CAM_STATUS_MASK;
			ccb->ccb_h.status |= CAM_REQUEUE_REQ;
			xpt_done(ccb);
		}
		return;
	}

	isci_log_message(0, "ISCI",
	    "Sending reset to device on controller %d domain %d CAM index %d\n",
	    controller->index, remote_device->domain->index,
	    remote_device->index
	);

	sci_pool_get(controller->request_pool, request);
	task_request = (struct ISCI_TASK_REQUEST *)request;

	task_request->parent.remote_device_handle = remote_device->sci_object;
	task_request->ccb = ccb;

	remote_device->is_resetting = TRUE;

	status = (SCI_STATUS) scif_task_request_construct(
	    controller->scif_controller_handle, remote_device->sci_object,
	    SCI_CONTROLLER_INVALID_IO_TAG, (void *)task_request,
	    (void *)((char*)task_request + sizeof(struct ISCI_TASK_REQUEST)),
	    &task_request->sci_object);

	if (status != SCI_SUCCESS) {
		isci_task_request_complete(controller->scif_controller_handle,
		    remote_device->sci_object, task_request->sci_object,
		    (SCI_TASK_STATUS)status);
		return;
	}

	status = (SCI_STATUS)scif_controller_start_task(
	    controller->scif_controller_handle, remote_device->sci_object,
	    task_request->sci_object, SCI_CONTROLLER_INVALID_IO_TAG);

	if (status != SCI_SUCCESS) {
		isci_task_request_complete(
		    controller->scif_controller_handle,
		    remote_device->sci_object, task_request->sci_object,
		    (SCI_TASK_STATUS)status);
		return;
	}
}
Ejemplo n.º 9
0
void
isci_io_request_execute_smp_io(union ccb *ccb,
    struct ISCI_CONTROLLER *controller)
{
	SCI_STATUS status;
	target_id_t target_id = ccb->ccb_h.target_id;
	struct ISCI_REQUEST *request;
	struct ISCI_IO_REQUEST *io_request;
	SCI_REMOTE_DEVICE_HANDLE_T smp_device_handle;
	struct ISCI_REMOTE_DEVICE *end_device = controller->remote_device[target_id];

	/* SMP commands are sent to an end device, because SMP devices are not
	 *  exposed to the kernel.  It is our responsibility to use this method
	 *  to get the SMP device that contains the specified end device.  If
	 *  the device is direct-attached, the handle will come back NULL, and
	 *  we'll just fail the SMP_IO with DEV_NOT_THERE.
	 */
	scif_remote_device_get_containing_device(end_device->sci_object,
	    &smp_device_handle);

	if (smp_device_handle == NULL) {
		ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
		ccb->ccb_h.status &= ~CAM_STATUS_MASK;
		ccb->ccb_h.status |= CAM_DEV_NOT_THERE;
		xpt_done(ccb);
		return;
	}

	if (sci_pool_empty(controller->request_pool)) {
		ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
		ccb->ccb_h.status &= ~CAM_STATUS_MASK;
		ccb->ccb_h.status |= CAM_REQUEUE_REQ;
		xpt_freeze_simq(controller->sim, 1);
		controller->is_frozen = TRUE;
		xpt_done(ccb);
		return;
	}

	ASSERT(device->is_resetting == FALSE);

	sci_pool_get(controller->request_pool, request);
	io_request = (struct ISCI_IO_REQUEST *)request;

	io_request->ccb = ccb;
	io_request->parent.remote_device_handle = smp_device_handle;

	status = isci_smp_request_construct(io_request);

	if (status != SCI_SUCCESS) {
		isci_io_request_complete(controller->scif_controller_handle,
		    smp_device_handle, io_request, (SCI_IO_STATUS)status);
		return;
	}

	sci_object_set_association(io_request->sci_object, io_request);

	status = (SCI_STATUS) scif_controller_start_io(
	    controller->scif_controller_handle, smp_device_handle,
	    io_request->sci_object, SCI_CONTROLLER_INVALID_IO_TAG);

	if (status != SCI_SUCCESS) {
		isci_io_request_complete(controller->scif_controller_handle,
		    smp_device_handle, io_request, (SCI_IO_STATUS)status);
		return;
	}

	if (ccb->ccb_h.timeout != CAM_TIME_INFINITY)
		callout_reset(&io_request->parent.timer, ccb->ccb_h.timeout,
		    isci_io_request_timeout, request);
}