/**
 * @brief This routine constructs a smp phy object for an expander phy and insert
 *           to owning expander device's smp_phy_list.
 * @param[in] this_smp_phy The memory space to store a phy
 * @param[in] owning_device The smp remote device that owns this smp phy.
 * @param[in] expander_phy_id The expander phy id for this_smp_phy.
 * @return None
 */
void scif_sas_smp_phy_construct(
   SCIF_SAS_SMP_PHY_T       * this_smp_phy,
   SCIF_SAS_REMOTE_DEVICE_T * owning_device,
   U8                         expander_phy_id
)
{
   memset(this_smp_phy, 0, sizeof(SCIF_SAS_SMP_PHY_T));

   this_smp_phy->phy_identifier = expander_phy_id;
   this_smp_phy->owning_device = owning_device;

   sci_fast_list_element_init((this_smp_phy), (&this_smp_phy->list_element));

   //insert to owning device's smp phy list.
   sci_fast_list_insert_tail(
      (&owning_device->protocol_device.smp_device.smp_phy_list),
      (&this_smp_phy->list_element)
   );
}
Example #2
0
int isci_controller_allocate_memory(struct ISCI_CONTROLLER *controller)
{
	int error;
	device_t device =  controller->isci->device;
	uint32_t max_segment_size = isci_io_request_get_max_io_size();
	uint32_t status = 0;
	struct ISCI_MEMORY *uncached_controller_memory =
	    &controller->uncached_controller_memory;
	struct ISCI_MEMORY *cached_controller_memory =
	    &controller->cached_controller_memory;
	struct ISCI_MEMORY *request_memory =
	    &controller->request_memory;
	POINTER_UINT virtual_address;
	bus_addr_t physical_address;

	controller->mdl = sci_controller_get_memory_descriptor_list_handle(
	    controller->scif_controller_handle);

	uncached_controller_memory->size = sci_mdl_decorator_get_memory_size(
	    controller->mdl, SCI_MDE_ATTRIBUTE_PHYSICALLY_CONTIGUOUS);

	error = isci_allocate_dma_buffer(device, uncached_controller_memory);

	if (error != 0)
	    return (error);

	sci_mdl_decorator_assign_memory( controller->mdl,
	    SCI_MDE_ATTRIBUTE_PHYSICALLY_CONTIGUOUS,
	    uncached_controller_memory->virtual_address,
	    uncached_controller_memory->physical_address);

	cached_controller_memory->size = sci_mdl_decorator_get_memory_size(
	    controller->mdl,
	    SCI_MDE_ATTRIBUTE_CACHEABLE | SCI_MDE_ATTRIBUTE_PHYSICALLY_CONTIGUOUS
	);

	error = isci_allocate_dma_buffer(device, cached_controller_memory);

	if (error != 0)
	    return (error);

	sci_mdl_decorator_assign_memory(controller->mdl,
	    SCI_MDE_ATTRIBUTE_CACHEABLE | SCI_MDE_ATTRIBUTE_PHYSICALLY_CONTIGUOUS,
	    cached_controller_memory->virtual_address,
	    cached_controller_memory->physical_address);

	request_memory->size =
	    controller->queue_depth * isci_io_request_get_object_size();

	error = isci_allocate_dma_buffer(device, request_memory);

	if (error != 0)
	    return (error);

	/* For STP PIO testing, we want to ensure we can force multiple SGLs
	 *  since this has been a problem area in SCIL.  This tunable parameter
	 *  will allow us to force DMA segments to a smaller size, ensuring
	 *  that even if a physically contiguous buffer is attached to this
	 *  I/O, the DMA subsystem will pass us multiple segments in our DMA
	 *  load callback.
	 */
	TUNABLE_INT_FETCH("hw.isci.max_segment_size", &max_segment_size);

	/* Create DMA tag for our I/O requests.  Then we can create DMA maps based off
	 *  of this tag and store them in each of our ISCI_IO_REQUEST objects.  This
	 *  will enable better performance than creating the DMA maps everytime we get
	 *  an I/O.
	 */
	status = bus_dma_tag_create(bus_get_dma_tag(device), 0x1, 0x0,
	    BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL,
	    isci_io_request_get_max_io_size(),
	    SCI_MAX_SCATTER_GATHER_ELEMENTS, max_segment_size, 0, NULL, NULL,
	    &controller->buffer_dma_tag);

	sci_pool_initialize(controller->request_pool);

	virtual_address = request_memory->virtual_address;
	physical_address = request_memory->physical_address;

	for (int i = 0; i < controller->queue_depth; i++) {
		struct ISCI_REQUEST *request =
		    (struct ISCI_REQUEST *)virtual_address;

		isci_request_construct(request,
		    controller->scif_controller_handle,
		    controller->buffer_dma_tag, physical_address);

		sci_pool_put(controller->request_pool, request);

		virtual_address += isci_request_get_object_size();
		physical_address += isci_request_get_object_size();
	}

	uint32_t remote_device_size = sizeof(struct ISCI_REMOTE_DEVICE) +
	    scif_remote_device_get_object_size();

	controller->remote_device_memory = (uint8_t *) malloc(
	    remote_device_size * SCI_MAX_REMOTE_DEVICES, M_ISCI,
	    M_NOWAIT | M_ZERO);

	sci_pool_initialize(controller->remote_device_pool);

	uint8_t *remote_device_memory_ptr = controller->remote_device_memory;

	for (int i = 0; i < SCI_MAX_REMOTE_DEVICES; i++) {
		struct ISCI_REMOTE_DEVICE *remote_device =
		    (struct ISCI_REMOTE_DEVICE *)remote_device_memory_ptr;

		controller->remote_device[i] = NULL;
		remote_device->index = i;
		remote_device->is_resetting = FALSE;
		remote_device->frozen_lun_mask = 0;
		sci_fast_list_element_init(remote_device,
		    &remote_device->pending_device_reset_element);
		sci_pool_put(controller->remote_device_pool, remote_device);
		remote_device_memory_ptr += remote_device_size;
	}

	return (0);
}
/**
 * @brief This method implements the actions taken when entering the
 *        RESET state.
 *
 * @param[in]  object This parameter specifies the base object for which
 *             the state transition is occurring.  This is cast into a
 *             SCIF_SAS_CONTROLLER object in the method implementation.
 *
 * @return none
 */
static
void scif_sas_controller_reset_state_enter(
   SCI_BASE_OBJECT_T * object
)
{
   SCIF_SAS_CONTROLLER_T * fw_controller = (SCIF_SAS_CONTROLLER_T *)object;
   U8 index;
   U16 smp_phy_index;

   SET_STATE_HANDLER(
      fw_controller,
      scif_sas_controller_state_handler_table,
      SCI_BASE_CONTROLLER_STATE_RESET
   );

   scif_sas_high_priority_request_queue_construct(
      &fw_controller->hprq, sci_base_object_get_logger(fw_controller)
   );

   // Construct the abstract element pool. This pool will store the
   // references to the framework's remote devices objects.
   sci_abstract_element_pool_construct(
      &fw_controller->free_remote_device_pool,
      fw_controller->remote_device_pool_elements,
      SCI_MAX_REMOTE_DEVICES
   );

   // Construct the domain objects.
   for (index = 0; index < SCI_MAX_DOMAINS; index++)
   {
      scif_sas_domain_construct(
         &fw_controller->domains[index], index, fw_controller
      );
   }

   //Initialize SMP PHY MEMORY LIST.
   sci_fast_list_init(&fw_controller->smp_phy_memory_list);

   for (smp_phy_index = 0;
        smp_phy_index < SCIF_SAS_SMP_PHY_COUNT;
        smp_phy_index++)
   {
      sci_fast_list_element_init(
         &fw_controller->smp_phy_array[smp_phy_index],
         &(fw_controller->smp_phy_array[smp_phy_index].list_element)
      );

      //insert to owning device's smp phy list.
      sci_fast_list_insert_tail(
         (&(fw_controller->smp_phy_memory_list)),
         (&(fw_controller->smp_phy_array[smp_phy_index].list_element))
      );
   }

   scif_sas_controller_set_default_config_parameters(fw_controller);

   fw_controller->internal_request_entries =
      SCIF_SAS_MAX_INTERNAL_REQUEST_COUNT;

   //@Todo: may need to verify all timers are released. Including domain's
   //operation timer and all the Internal IO's timer.

   //take care of the lock.
   scif_cb_lock_disassociate(fw_controller, &fw_controller->hprq.lock);
}