Example #1
0
static int
mptsas_raidvol_page_0_cb(mptsas_t *mpt, caddr_t page_memp,
    ddi_acc_handle_t accessp, uint16_t iocstatus, uint32_t iocloginfo,
    va_list ap)
{
#ifndef __lock_lint
	_NOTE(ARGUNUSED(ap))
#endif
	pMpi2RaidVolPage0_t raidpage;
	int rval = DDI_SUCCESS, i;
	mptsas_raidvol_t *raidvol;
	uint8_t	numdisks, volstate, voltype, physdisknum;
	uint32_t volsetting;
	uint32_t statusflags, resync_flag;

	if (iocstatus == MPI2_IOCSTATUS_CONFIG_INVALID_PAGE)
		return (DDI_FAILURE);

	if (iocstatus != MPI2_IOCSTATUS_SUCCESS) {
		mptsas_log(mpt, CE_WARN, "mptsas_raidvol_page0_cb "
		    "config: IOCStatus=0x%x, IOCLogInfo=0x%x",
		    iocstatus, iocloginfo);
		rval = DDI_FAILURE;
		return (rval);
	}

	raidvol = va_arg(ap,  mptsas_raidvol_t *);

	raidpage = (pMpi2RaidVolPage0_t)page_memp;
	volstate = ddi_get8(accessp, &raidpage->VolumeState);
	volsetting = ddi_get32(accessp,
	    (uint32_t *)(void *)&raidpage->VolumeSettings);
	statusflags = ddi_get32(accessp, &raidpage->VolumeStatusFlags);
	voltype = ddi_get8(accessp, &raidpage->VolumeType);

	raidvol->m_state = volstate;
	raidvol->m_statusflags = statusflags;
	/*
	 * Volume size is not used right now. Set to 0.
	 */
	raidvol->m_raidsize = 0;
	raidvol->m_settings = volsetting;
	raidvol->m_raidlevel = voltype;

	if (statusflags & MPI2_RAIDVOL0_STATUS_FLAG_QUIESCED) {
		mptsas_log(mpt, CE_NOTE, "?Volume %d is quiesced\n",
		    raidvol->m_raidhandle);
	}

	if (statusflags &
	    MPI2_RAIDVOL0_STATUS_FLAG_RESYNC_IN_PROGRESS) {
		mptsas_log(mpt, CE_NOTE, "?Volume %d is resyncing\n",
		    raidvol->m_raidhandle);
	}

	resync_flag = MPI2_RAIDVOL0_STATUS_FLAG_RESYNC_IN_PROGRESS;
	switch (volstate) {
	case MPI2_RAID_VOL_STATE_OPTIMAL:
		mptsas_log(mpt, CE_NOTE, "?Volume %d is "
		    "optimal\n", raidvol->m_raidhandle);
		break;
	case MPI2_RAID_VOL_STATE_DEGRADED:
		if ((statusflags & resync_flag) == 0) {
			mptsas_log(mpt, CE_WARN, "Volume %d "
			    "is degraded\n",
			    raidvol->m_raidhandle);
		}
		break;
	case MPI2_RAID_VOL_STATE_FAILED:
		mptsas_log(mpt, CE_WARN, "Volume %d is "
		    "failed\n", raidvol->m_raidhandle);
		break;
	case MPI2_RAID_VOL_STATE_MISSING:
		mptsas_log(mpt, CE_WARN, "Volume %d is "
		    "missing\n", raidvol->m_raidhandle);
		break;
	default:
		break;
	}
	numdisks = raidpage->NumPhysDisks;
	raidvol->m_ndisks = numdisks;
	for (i = 0; i < numdisks; i++) {
		physdisknum = raidpage->PhysDisk[i].PhysDiskNum;
		raidvol->m_disknum[i] = physdisknum;
		if (mptsas_get_physdisk_settings(mpt, raidvol,
		    physdisknum))
			break;
	}
	return (rval);
}
Example #2
0
static int
mptsas_raidconf_page_0_cb(mptsas_t *mpt, caddr_t page_memp,
    ddi_acc_handle_t accessp, uint16_t iocstatus, uint32_t iocloginfo,
    va_list ap)
{
#ifndef __lock_lint
	_NOTE(ARGUNUSED(ap))
#endif
	pMpi2RaidConfigurationPage0_t	raidconfig_page0;
	pMpi2RaidConfig0ConfigElement_t	element;
	uint32_t *confignum;
	int rval = DDI_SUCCESS, i;
	uint8_t numelements, vol, disk;
	uint16_t elementtype, voldevhandle;
	uint16_t etype_vol, etype_pd, etype_hs;
	uint16_t etype_oce;
	mptsas_slots_t *slots = mpt->m_active;
	m_raidconfig_t *raidconfig;
	uint64_t raidwwn;
	uint32_t native;
	mptsas_target_t	*ptgt;
	uint32_t configindex;

	if (iocstatus == MPI2_IOCSTATUS_CONFIG_INVALID_PAGE) {
		return (DDI_FAILURE);
	}

	if (iocstatus != MPI2_IOCSTATUS_SUCCESS) {
		mptsas_log(mpt, CE_WARN, "mptsas_get_raid_conf_page0 "
		    "config: IOCStatus=0x%x, IOCLogInfo=0x%x",
		    iocstatus, iocloginfo);
		rval = DDI_FAILURE;
		return (rval);
	}
	confignum = va_arg(ap,  uint32_t *);
	configindex = va_arg(ap, uint32_t);
	raidconfig_page0 = (pMpi2RaidConfigurationPage0_t)page_memp;
	/*
	 * Get all RAID configurations.
	 */
	etype_vol = MPI2_RAIDCONFIG0_EFLAGS_VOLUME_ELEMENT;
	etype_pd = MPI2_RAIDCONFIG0_EFLAGS_VOL_PHYS_DISK_ELEMENT;
	etype_hs = MPI2_RAIDCONFIG0_EFLAGS_HOT_SPARE_ELEMENT;
	etype_oce = MPI2_RAIDCONFIG0_EFLAGS_OCE_ELEMENT;
	/*
	 * Set up page address for next time through.
	 */
	*confignum =  ddi_get8(accessp,
	    &raidconfig_page0->ConfigNum);

	/*
	 * Point to the right config in the structure.
	 * Increment the number of valid RAID configs.
	 */
	raidconfig = &slots->m_raidconfig[configindex];
	slots->m_num_raid_configs++;

	/*
	 * Set the native flag if this is not a foreign
	 * configuration.
	 */
	native = ddi_get32(accessp, &raidconfig_page0->Flags);
	if (native & MPI2_RAIDCONFIG0_FLAG_FOREIGN_CONFIG) {
		native = FALSE;
	} else {
		native = TRUE;
	}
	raidconfig->m_native = (uint8_t)native;

	/*
	 * Get volume information for the volumes in the
	 * config.
	 */
	numelements = ddi_get8(accessp, &raidconfig_page0->NumElements);
	vol = 0;
	disk = 0;
	element = (pMpi2RaidConfig0ConfigElement_t)
	    &raidconfig_page0->ConfigElement;

	for (i = 0; ((i < numelements) && native); i++, element++) {
		/*
		 * Get the element type.  Could be Volume,
		 * PhysDisk, Hot Spare, or Online Capacity
		 * Expansion PhysDisk.
		 */
		elementtype = ddi_get16(accessp, &element->ElementFlags);
		elementtype &= MPI2_RAIDCONFIG0_EFLAGS_MASK_ELEMENT_TYPE;

		/*
		 * For volumes, get the RAID settings and the
		 * WWID.
		 */
		if (elementtype == etype_vol) {
			voldevhandle = ddi_get16(accessp,
			    &element->VolDevHandle);
			raidconfig->m_raidvol[vol].m_israid = 1;
			raidconfig->m_raidvol[vol].
			    m_raidhandle = voldevhandle;
			/*
			 * Get the settings for the raid
			 * volume.  This includes the
			 * DevHandles for the disks making up
			 * the raid volume.
			 */
			if (mptsas_get_raid_settings(mpt,
			    &raidconfig->m_raidvol[vol]))
				continue;

			/*
			 * Get the WWID of the RAID volume for
			 * SAS HBA
			 */
			if (mptsas_get_raid_wwid(mpt,
			    &raidconfig->m_raidvol[vol]))
				continue;

			raidwwn = raidconfig->m_raidvol[vol].
			    m_raidwwid;

			/*
			 * RAID uses phymask of 0.
			 */
			ptgt = mptsas_tgt_alloc(&slots->m_tgttbl,
			    voldevhandle, raidwwn, 0, 0, 0, mpt);

			raidconfig->m_raidvol[vol].m_raidtgt =
			    ptgt;

			/*
			 * Increment volume index within this
			 * raid config.
			 */
			vol++;
		} else if ((elementtype == etype_pd) ||
		    (elementtype == etype_hs) ||
		    (elementtype == etype_oce)) {
			/*
			 * For all other element types, put
			 * their DevHandles in the phys disk
			 * list of the config.  These are all
			 * some variation of a Phys Disk and
			 * this list is used to keep these
			 * disks from going online.
			 */
			raidconfig->m_physdisk_devhdl[disk] = ddi_get16(accessp,
			    &element->PhysDiskDevHandle);

			/*
			 * Increment disk index within this
			 * raid config.
			 */
			disk++;
		}
	}

	return (rval);
}
Example #3
0
/*
 * audio1575_close()
 *
 * Description:
 *	Closes an audio DMA engine that was previously opened.  Since
 *	nobody is using it, we take this opportunity to possibly power
 *	down the entire device.
 *
 * Arguments:
 *	void	*arg		The DMA engine to shut down
 */
static void
audio1575_close(void *arg)
{
	_NOTE(ARGUNUSED(arg));
}
Example #4
0
/*
 * ISR/periodic callbacks.
 */
uint_t
efe_intr(caddr_t arg1, caddr_t arg2)
{
	efe_t *efep = (void *)arg1;
	uint32_t status;
	mblk_t *mp = NULL;

	_NOTE(ARGUNUSED(arg2));

	mutex_enter(&efep->efe_intrlock);

	if (efep->efe_flags & FLAG_SUSPENDED) {
		mutex_exit(&efep->efe_intrlock);
		return (DDI_INTR_UNCLAIMED);
	}

	status = GETCSR(efep, CSR_INTSTAT);
	if (!(status & INTSTAT_ACTV)) {
		mutex_exit(&efep->efe_intrlock);
		return (DDI_INTR_UNCLAIMED);
	}
	PUTCSR(efep, CSR_INTSTAT, status);

	if (status & INTSTAT_RCC) {
		mp = efe_recv(efep);
	}

	if (status & INTSTAT_RQE) {
		efep->efe_ierrors++;
		efep->efe_macrcv_errors++;

		/* Kick the receiver */
		PUTCSR(efep, CSR_COMMAND, COMMAND_RXQUEUED);
	}

	if (status & INTSTAT_TXC) {
		mutex_enter(&efep->efe_txlock);

		efe_send_done(efep);

		mutex_exit(&efep->efe_txlock);
	}

	if (status & INTSTAT_FATAL) {
		mutex_enter(&efep->efe_txlock);

		efe_error(efep->efe_dip, "bus error; resetting!");
		efe_restart(efep);

		mutex_exit(&efep->efe_txlock);
	}

	mutex_exit(&efep->efe_intrlock);

	if (mp != NULL) {
		mac_rx(efep->efe_mh, NULL, mp);
	}

	if (status & INTSTAT_TXC) {
		mac_tx_update(efep->efe_mh);
	}

	if (status & INTSTAT_FATAL) {
		mii_reset(efep->efe_miih);
	}

	return (DDI_INTR_CLAIMED);
}
/*ARGSUSED*/
gtgt_t *
ghd_target_init(dev_info_t	*hba_dip,
		dev_info_t	*tgt_dip,
		ccc_t		*cccp,
		size_t		 tgt_private_size,
		void		*hba_private,
		ushort_t	 target,
		uchar_t		 lun)
{
	_NOTE(ARGUNUSED(hba_dip))
	gtgt_t	*gtgtp;
	size_t	 size = sizeof (*gtgtp) + tgt_private_size;
	gdev_t	*gdevp;
	ulong_t	 maxactive;

	gtgtp = kmem_zalloc(size, KM_SLEEP);

	/*
	 * initialize the per instance structure
	 */

	gtgtp->gt_tgt_private = (void *)(gtgtp + 1);
	gtgtp->gt_size = size;
	gtgtp->gt_hba_private = hba_private;
	gtgtp->gt_target = target;
	gtgtp->gt_lun = lun;
	gtgtp->gt_ccc = cccp;

	/*
	 * set the queue's maxactive to 1 if
	 * property not specified on target or hba devinfo node
	 */
	maxactive = ddi_getprop(DDI_DEV_T_ANY, tgt_dip, 0, "ghd-maxactive", 1);
	gtgtp->gt_maxactive = maxactive;

	/* initialize the linked list pointers */
	GTGT_INIT(gtgtp);

	/*
	 * grab both mutexes so the queue structures
	 * stay stable while adding this instance to the linked lists
	 */
	mutex_enter(&cccp->ccc_hba_mutex);
	mutex_enter(&cccp->ccc_waitq_mutex);

	/*
	 * Search the HBA's linked list of device structures.
	 *
	 * If this device is already attached then link this instance
	 * to the existing per-device-structure on the ccc_devs list.
	 *
	 */
	gdevp = CCCP2GDEVP(cccp);
	while (gdevp != NULL) {
		if (gdevp->gd_target == target && gdevp->gd_lun == lun) {
			GDBG_WAITQ(("ghd_target_init(%d,%d) found gdevp 0x%p"
			    " gtgtp 0x%p max %lu\n", target, lun,
			    (void *)gdevp, (void *)gtgtp, maxactive));

			goto foundit;
		}
		gdevp = GDEV_NEXTP(gdevp);
	}

	/*
	 * Not found. This is the first instance for this device.
	 */


	/* allocate the per-device-structure */

	gdevp = kmem_zalloc(sizeof (*gdevp), KM_SLEEP);
	gdevp->gd_target = target;
	gdevp->gd_lun = lun;

	/*
	 * link this second level queue to the HBA's first
	 * level queue
	 */
	GDEV_QATTACH(gdevp, cccp, maxactive);

	GDBG_WAITQ(("ghd_target_init(%d,%d) new gdevp 0x%p gtgtp 0x%p"
	    " max %lu\n", target, lun, (void *)gdevp, (void *)gtgtp,
	    maxactive));

foundit:

	/* save the ptr to the per device structure */
	gtgtp->gt_gdevp = gdevp;

	/* Add the per instance structure to the per device list  */
	GTGT_ATTACH(gtgtp, gdevp);

	ghd_waitq_process_and_mutex_exit(cccp);

	return (gtgtp);
}
/*
 * tavor_agent_request_cb()
 *    Context: Called from the IBMF context
 */
static void
tavor_agent_request_cb(ibmf_handle_t ibmf_handle, ibmf_msg_t *msgp,
    void *args)
{
	tavor_agent_handler_arg_t	*cb_args;
	tavor_agent_list_t		*curr;
	tavor_state_t			*state;
	int				status;
	int				ibmf_status;

	TAVOR_TNF_ENTER(tavor_agent_request_cb);

	curr  = (tavor_agent_list_t *)args;
	state = curr->agl_state;

	/*
	 * Allocate space to hold the callback args (for passing to the
	 * task queue).  Note: If we are unable to allocate space for the
	 * the callback args here, then we just return.  But we must ensure
	 * that we call ibmf_free_msg() to free up the message.
	 */
	cb_args = (tavor_agent_handler_arg_t *)kmem_zalloc(
	    sizeof (tavor_agent_handler_arg_t), KM_NOSLEEP);
	if (cb_args == NULL) {
		ibmf_status = ibmf_free_msg(ibmf_handle, &msgp);
		if (ibmf_status != IBMF_SUCCESS) {
			TNF_PROBE_1(tavor_agent_request_cb_ibmf_free_msg_fail,
			    TAVOR_TNF_ERROR, "", tnf_uint, ibmf_status,
			    ibmf_status);
		}
		TNF_PROBE_0(tavor_agent_request_cb_kma_fail,
		    TAVOR_TNF_ERROR, "");
		TAVOR_TNF_EXIT(tavor_agent_request_cb);
		return;
	}
	_NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*cb_args))

	/* Fill in the callback args */
	cb_args->ahd_ibmfhdl	= ibmf_handle;
	cb_args->ahd_ibmfmsg	= msgp;
	cb_args->ahd_agentlist	= args;

	/*
	 * Dispatch the message to the task queue.  Note: Just like above,
	 * if this request fails for any reason then make sure to free up
	 * the IBMF message and then return
	 */
	status = ddi_taskq_dispatch(state->ts_taskq_agents,
	    tavor_agent_handle_req, cb_args, DDI_NOSLEEP);
	if (status == DDI_FAILURE) {
		kmem_free(cb_args, sizeof (tavor_agent_handler_arg_t));
		ibmf_status = ibmf_free_msg(ibmf_handle, &msgp);
		if (ibmf_status != IBMF_SUCCESS) {
			TNF_PROBE_1(tavor_agent_request_cb_ibmf_free_msg_fail,
			    TAVOR_TNF_ERROR, "", tnf_uint, ibmf_status,
			    ibmf_status);
		}
		TNF_PROBE_0(tavor_agent_request_cb_taskq_fail,
		    TAVOR_TNF_ERROR, "");
	}
	TAVOR_TNF_EXIT(tavor_agent_request_cb);
}
Example #7
0
	return (0);

fail2:
	EFSYS_PROBE(fail2);
fail1:
	EFSYS_PROBE1(fail1, efx_rc_t, rc);

	return (rc);
}

			void
siena_nic_fini(
	__in		efx_nic_t *enp)
{
	_NOTE(ARGUNUSED(enp))
}

			void
siena_nic_unprobe(
	__in		efx_nic_t *enp)
{
#if EFSYS_OPT_MON_STATS
	mcdi_mon_cfg_free(enp);
#endif /* EFSYS_OPT_MON_STATS */
	(void) efx_mcdi_drv_attach(enp, B_FALSE);
}

#if EFSYS_OPT_DIAG

static efx_register_set_t __siena_registers[] = {