Ejemplo n.º 1
0
static void
fipe_ioat_cancel(void)
{
	uint32_t status;
	uint8_t	*addr = fipe_ioat_ctrl.ioat_reg_addr;
	ddi_acc_handle_t handle = fipe_ioat_ctrl.ioat_reg_handle;

	/*
	 * Reset channel. Sometimes reset is not reliable,
	 * so check completion or abort status after reset.
	 */
	/* LINTED: constant in conditional context */
	while (1) {
		/* Issue reset channel command. */
		ddi_put8(handle, (uint8_t *)(addr + FIPE_IOAT_CHAN_CMD), 0x20);

		/* Query command status. */
		status = ddi_get32(handle,
		    (uint32_t *)(addr + FIPE_IOAT_CHAN_STS_LO));
		if (status & 0x1) {
			/* Reset channel completed. */
			break;
		} else {
			SMT_PAUSE();
		}
	}

	/* Put channel into "not in use" state. */
	ddi_put16(handle, (uint16_t *)(addr + FIPE_IOAT_CHAN_CTRL), 0);
}
Ejemplo n.º 2
0
static void
rge_chip_peek_reg(rge_t *rgep, rge_peekpoke_t *ppd)
{
	uint64_t regval;
	void *regaddr;

	RGE_TRACE(("rge_chip_peek_reg($%p, $%p)",
	    (void *)rgep, (void *)ppd));

	regaddr = PIO_ADDR(rgep, ppd->pp_acc_offset);

	switch (ppd->pp_acc_size) {
	case 1:
		regval = ddi_get8(rgep->io_handle, regaddr);
		break;

	case 2:
		regval = ddi_get16(rgep->io_handle, regaddr);
		break;

	case 4:
		regval = ddi_get32(rgep->io_handle, regaddr);
		break;

	case 8:
		regval = ddi_get64(rgep->io_handle, regaddr);
		break;
	}

	ppd->pp_acc_data = regval;
}
Ejemplo n.º 3
0
/**
 * Get host supported features.
 *
 * @param pDevice           Pointer to the Virtio device instance.
 *
 * @return Mask of host features.
 */
static uint32_t VirtioPciGetFeatures(PVIRTIODEVICE pDevice)
{
    LogFlowFunc((VIRTIOLOGNAME ":VirtioPciGetFeatures pDevice=%p\n", pDevice));
    virtio_pci_t *pPciData = pDevice->pvHyper;
    AssertReturn(pPciData, 0);

    return ddi_get32(pPciData->hIO, (uint32_t *)(pPciData->addrIOBase + VIRTIO_PCI_HOST_FEATURES));
}
Ejemplo n.º 4
0
static uint32_t
rge_reg_get32(rge_t *rgep, uintptr_t regno)
{
	RGE_TRACE(("rge_reg_get32($%p, 0x%lx)",
	    (void *)rgep, regno));

	return (ddi_get32(rgep->io_handle, REG32(rgep, regno)));
}
Ejemplo n.º 5
0
uint32_t
virtio_read_device_config_4(struct virtio_softc *sc, unsigned int index)
{
	ASSERT(sc->sc_config_offset);
	return ddi_get32(sc->sc_ioh,
	    /* LINTED E_BAD_PTR_CAST_ALIGN */
	    (uint32_t *)(sc->sc_io_addr + sc->sc_config_offset + index));
}
Ejemplo n.º 6
0
uint64_t
virtio_read_device_config_8(struct virtio_softc *sc, unsigned int index)
{
	uint64_t r;

	ASSERT(sc->sc_config_offset);
	r = ddi_get32(sc->sc_ioh,
	    /* LINTED E_BAD_PTR_CAST_ALIGN */
	    (uint32_t *)(sc->sc_io_addr + sc->sc_config_offset +
	    index + sizeof (uint32_t)));

	r <<= 32;

	r += ddi_get32(sc->sc_ioh,
	    /* LINTED E_BAD_PTR_CAST_ALIGN */
	    (uint32_t *)(sc->sc_io_addr + sc->sc_config_offset + index));
	return (r);
}
Ejemplo n.º 7
0
uint32_t
pci_config_get32(ddi_acc_handle_t handle, off_t offset)
{
	caddr_t	cfgaddr;
	ddi_acc_hdl_t *hp;

	hp = impl_acc_hdl_get(handle);
	cfgaddr = hp->ah_addr + offset;
	return (ddi_get32(handle, (uint32_t *)cfgaddr));
}
Ejemplo n.º 8
0
/* Implements functions to load and write Quantis registers */
quantis_register_value quantis_reg_get(quantis_pci_device* qdev,
                                       quantis_register reg)
{
  char msg[MAX_MSG_LEN];
  LOG_DEBUG1("In quantis_reg_get with reg=%d\n", reg);
  if (reg % 4 !=  0)
  {
    snprintf(msg,
             MAX_MSG_LEN,
             "Offset (%d) in the registers array is not divisible by 4. This could crash the driver.\n",
             reg);
    QUANTIS_WARN(msg);
  }
  return (quantis_register_value)ddi_get32(qdev->regs_handle,
                                           (quantis_register_value *)(qdev->regs + reg));
}
Ejemplo n.º 9
0
static int
fipe_ioat_trigger(void)
{
	uint16_t ctrl;
	uint32_t err;
	uint8_t	*addr = fipe_ioat_ctrl.ioat_reg_addr;
	ddi_acc_handle_t handle = fipe_ioat_ctrl.ioat_reg_handle;

	/* Check channel in use flag. */
	ctrl = ddi_get16(handle, (uint16_t *)(addr + FIPE_IOAT_CHAN_CTRL));
	if (ctrl & 0x100) {
		/*
		 * Channel is in use by somebody else. IOAT driver may have
		 * been loaded, forbid fipe from accessing IOAT hardware
		 * anymore.
		 */
		fipe_ioat_ctrl.ioat_ready = B_FALSE;
		fipe_ioat_ctrl.ioat_failed = B_TRUE;
		FIPE_KSTAT_INC(ioat_start_fail_cnt);
		return (-1);
	} else {
		/* Set channel in use flag. */
		ddi_put16(handle,
		    (uint16_t *)(addr + FIPE_IOAT_CHAN_CTRL), 0x100);
	}

	/* Write command address. */
	ddi_put32(handle,
	    (uint32_t *)(addr + FIPE_IOAT_CHAN_ADDR_LO),
	    (uint32_t)fipe_ioat_ctrl.ioat_cmd_physaddr);
	ddi_put32(handle, (uint32_t *)(addr + FIPE_IOAT_CHAN_ADDR_HI),
	    (uint32_t)(fipe_ioat_ctrl.ioat_cmd_physaddr >> 32));

	/* Check and clear error flags. */
	err = ddi_get32(handle, (uint32_t *)(addr + FIPE_IOAT_CHAN_ERR));
	if (err != 0) {
		ddi_put32(handle, (uint32_t *)(addr + FIPE_IOAT_CHAN_ERR), err);
	}

	/* Start channel. */
	ddi_put8(handle, (uint8_t *)(addr + FIPE_IOAT_CHAN_CMD), 0x1);

	return (0);
}
Ejemplo n.º 10
0
/*
 * Negotiate features, save the result in sc->sc_features
 */
uint32_t
virtio_negotiate_features(struct virtio_softc *sc, uint32_t guest_features)
{
	uint32_t host_features;
	uint32_t features;

	host_features = ddi_get32(sc->sc_ioh,
	    /* LINTED E_BAD_PTR_CAST_ALIGN */
	    (uint32_t *)(sc->sc_io_addr + VIRTIO_CONFIG_DEVICE_FEATURES));

	dev_debug(sc->sc_dev, CE_NOTE, "host features: %x, guest features: %x",
	    host_features, guest_features);

	features = host_features & guest_features;
	ddi_put32(sc->sc_ioh,
	    /* LINTED E_BAD_PTR_CAST_ALIGN */
	    (uint32_t *)(sc->sc_io_addr + VIRTIO_CONFIG_GUEST_FEATURES),
	    features);

	sc->sc_features = features;

	return (host_features);
}
Ejemplo n.º 11
0
static int
mptsas_raidvol_page_0_cb(mptsas_t *mpt, caddr_t page_memp,
    ddi_acc_handle_t accessp, uint16_t iocstatus, uint32_t iocloginfo,
    va_list ap)
{
#ifndef __lock_lint
	_NOTE(ARGUNUSED(ap))
#endif
	pMpi2RaidVolPage0_t raidpage;
	int rval = DDI_SUCCESS, i;
	mptsas_raidvol_t *raidvol;
	uint8_t	numdisks, volstate, voltype, physdisknum;
	uint32_t volsetting;
	uint32_t statusflags, resync_flag;

	if (iocstatus == MPI2_IOCSTATUS_CONFIG_INVALID_PAGE)
		return (DDI_FAILURE);

	if (iocstatus != MPI2_IOCSTATUS_SUCCESS) {
		mptsas_log(mpt, CE_WARN, "mptsas_raidvol_page0_cb "
		    "config: IOCStatus=0x%x, IOCLogInfo=0x%x",
		    iocstatus, iocloginfo);
		rval = DDI_FAILURE;
		return (rval);
	}

	raidvol = va_arg(ap,  mptsas_raidvol_t *);

	raidpage = (pMpi2RaidVolPage0_t)page_memp;
	volstate = ddi_get8(accessp, &raidpage->VolumeState);
	volsetting = ddi_get32(accessp,
	    (uint32_t *)(void *)&raidpage->VolumeSettings);
	statusflags = ddi_get32(accessp, &raidpage->VolumeStatusFlags);
	voltype = ddi_get8(accessp, &raidpage->VolumeType);

	raidvol->m_state = volstate;
	raidvol->m_statusflags = statusflags;
	/*
	 * Volume size is not used right now. Set to 0.
	 */
	raidvol->m_raidsize = 0;
	raidvol->m_settings = volsetting;
	raidvol->m_raidlevel = voltype;

	if (statusflags & MPI2_RAIDVOL0_STATUS_FLAG_QUIESCED) {
		mptsas_log(mpt, CE_NOTE, "?Volume %d is quiesced\n",
		    raidvol->m_raidhandle);
	}

	if (statusflags &
	    MPI2_RAIDVOL0_STATUS_FLAG_RESYNC_IN_PROGRESS) {
		mptsas_log(mpt, CE_NOTE, "?Volume %d is resyncing\n",
		    raidvol->m_raidhandle);
	}

	resync_flag = MPI2_RAIDVOL0_STATUS_FLAG_RESYNC_IN_PROGRESS;
	switch (volstate) {
	case MPI2_RAID_VOL_STATE_OPTIMAL:
		mptsas_log(mpt, CE_NOTE, "?Volume %d is "
		    "optimal\n", raidvol->m_raidhandle);
		break;
	case MPI2_RAID_VOL_STATE_DEGRADED:
		if ((statusflags & resync_flag) == 0) {
			mptsas_log(mpt, CE_WARN, "Volume %d "
			    "is degraded\n",
			    raidvol->m_raidhandle);
		}
		break;
	case MPI2_RAID_VOL_STATE_FAILED:
		mptsas_log(mpt, CE_WARN, "Volume %d is "
		    "failed\n", raidvol->m_raidhandle);
		break;
	case MPI2_RAID_VOL_STATE_MISSING:
		mptsas_log(mpt, CE_WARN, "Volume %d is "
		    "missing\n", raidvol->m_raidhandle);
		break;
	default:
		break;
	}
	numdisks = raidpage->NumPhysDisks;
	raidvol->m_ndisks = numdisks;
	for (i = 0; i < numdisks; i++) {
		physdisknum = raidpage->PhysDisk[i].PhysDiskNum;
		raidvol->m_disknum[i] = physdisknum;
		if (mptsas_get_physdisk_settings(mpt, raidvol,
		    physdisknum))
			break;
	}
	return (rval);
}
Ejemplo n.º 12
0
uint32_t
e1000_read_reg(struct e1000_hw *hw, uint32_t offset)
{
    return (ddi_get32(((struct e1000g_osdep *)(hw)->back)->reg_handle,
                      (uint32_t *)((uintptr_t)(hw)->hw_addr + offset)));
}
Ejemplo n.º 13
0
uint32_t
t4_read_reg(struct adapter *sc, uint32_t reg)
{
    /* LINTED: E_BAD_PTR_CAST_ALIGN */
    return (ddi_get32(sc->regh, (uint32_t *)(sc->regp + reg)));
}
Ejemplo n.º 14
0
/*
 * RAID Action for System Shutdown. This request uses the dedicated TM slot to
 * avoid a call to mptsas_save_cmd.  Since Solaris requires that the mutex is
 * not held during the mptsas_quiesce function, this RAID action must not use
 * the normal code path of requests and replies.
 */
void
mptsas_raid_action_system_shutdown(mptsas_t *mpt)
{
	pMpi2RaidActionRequest_t	action;
	uint8_t				ir_active = FALSE, reply_type;
	uint8_t				function, found_reply = FALSE;
	uint16_t			SMID, action_type;
	mptsas_slots_t			*slots = mpt->m_active;
	int				config, vol;
	mptsas_cmd_t			*cmd;
	uint32_t			request_desc_low, reply_addr;
	int				cnt;
	pMpi2ReplyDescriptorsUnion_t	reply_desc_union;
	pMPI2DefaultReply_t		reply;
	pMpi2AddressReplyDescriptor_t	address_reply;

	/*
	 * Before doing the system shutdown RAID Action, make sure that the IOC
	 * supports IR and make sure there is a valid volume for the request.
	 */
	if (mpt->m_ir_capable) {
		for (config = 0; (config < slots->m_num_raid_configs) &&
		    (!ir_active); config++) {
			for (vol = 0; vol < MPTSAS_MAX_RAIDVOLS; vol++) {
				if (slots->m_raidconfig[config].m_raidvol[vol].
				    m_israid) {
					ir_active = TRUE;
					break;
				}
			}
		}
	}
	if (!ir_active) {
		return;
	}

	/*
	 * If TM slot is already being used (highly unlikely), show message and
	 * don't issue the RAID action.
	 */
	if (slots->m_slot[MPTSAS_TM_SLOT(mpt)] != NULL) {
		mptsas_log(mpt, CE_WARN, "RAID Action slot in use.  Cancelling"
		    " System Shutdown RAID Action.\n");
		return;
	}

	/*
	 * Create the cmd and put it in the dedicated TM slot.
	 */
	cmd = &(mpt->m_event_task_mgmt.m_event_cmd);
	bzero((caddr_t)cmd, sizeof (*cmd));
	cmd->cmd_pkt = NULL;
	cmd->cmd_slot = MPTSAS_TM_SLOT(mpt);
	slots->m_slot[MPTSAS_TM_SLOT(mpt)] = cmd;

	/*
	 * Form message for raid action.
	 */
	action = (pMpi2RaidActionRequest_t)(mpt->m_req_frame +
	    (mpt->m_req_frame_size * cmd->cmd_slot));
	bzero(action, mpt->m_req_frame_size);
	action->Function = MPI2_FUNCTION_RAID_ACTION;
	action->Action = MPI2_RAID_ACTION_SYSTEM_SHUTDOWN_INITIATED;

	/*
	 * Send RAID Action.
	 */
	(void) ddi_dma_sync(mpt->m_dma_req_frame_hdl, 0, 0,
	    DDI_DMA_SYNC_FORDEV);
	request_desc_low = (cmd->cmd_slot << 16) +
	    MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
	MPTSAS_START_CMD(mpt, request_desc_low, 0);

	/*
	 * Even though reply does not matter because the system is shutting
	 * down, wait no more than 5 seconds here to get the reply just because
	 * we don't want to leave it hanging if it's coming.  Poll because
	 * interrupts are disabled when this function is called.
	 */
	for (cnt = 0; cnt < 5000; cnt++) {
		/*
		 * Check for a reply.
		 */
		(void) ddi_dma_sync(mpt->m_dma_post_queue_hdl, 0, 0,
		    DDI_DMA_SYNC_FORCPU);

		reply_desc_union = (pMpi2ReplyDescriptorsUnion_t)
		    MPTSAS_GET_NEXT_REPLY(mpt, mpt->m_post_index);

		if (ddi_get32(mpt->m_acc_post_queue_hdl,
		    &reply_desc_union->Words.Low) == 0xFFFFFFFF ||
		    ddi_get32(mpt->m_acc_post_queue_hdl,
		    &reply_desc_union->Words.High) == 0xFFFFFFFF) {
			drv_usecwait(1000);
			continue;
		}

		/*
		 * There is a reply.  If it's not an address reply, ignore it.
		 */
		reply_type = ddi_get8(mpt->m_acc_post_queue_hdl,
		    &reply_desc_union->Default.ReplyFlags);
		reply_type &= MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK;
		if (reply_type != MPI2_RPY_DESCRIPT_FLAGS_ADDRESS_REPLY) {
			goto clear_and_continue;
		}

		/*
		 * SMID must be the TM slot since that's what we're using for
		 * this RAID action.  If not, ignore this reply.
		 */
		address_reply =
		    (pMpi2AddressReplyDescriptor_t)reply_desc_union;
		SMID = ddi_get16(mpt->m_acc_post_queue_hdl,
		    &address_reply->SMID);
		if (SMID != MPTSAS_TM_SLOT(mpt)) {
			goto clear_and_continue;
		}

		/*
		 * If reply frame is not in the proper range ignore it.
		 */
		reply_addr = ddi_get32(mpt->m_acc_post_queue_hdl,
		    &address_reply->ReplyFrameAddress);
		if ((reply_addr < mpt->m_reply_frame_dma_addr) ||
		    (reply_addr >= (mpt->m_reply_frame_dma_addr +
		    (mpt->m_reply_frame_size * mpt->m_free_queue_depth))) ||
		    ((reply_addr - mpt->m_reply_frame_dma_addr) %
		    mpt->m_reply_frame_size != 0)) {
			goto clear_and_continue;
		}

		/*
		 * If not a RAID action reply ignore it.
		 */
		(void) ddi_dma_sync(mpt->m_dma_reply_frame_hdl, 0, 0,
		    DDI_DMA_SYNC_FORCPU);
		reply = (pMPI2DefaultReply_t)(mpt->m_reply_frame +
		    (reply_addr - mpt->m_reply_frame_dma_addr));
		function = ddi_get8(mpt->m_acc_reply_frame_hdl,
		    &reply->Function);
		if (function != MPI2_FUNCTION_RAID_ACTION) {
			goto clear_and_continue;
		}

		/*
		 * Finally, make sure this is the System Shutdown RAID action.
		 * If not, ignore reply.
		 */
		action_type = ddi_get16(mpt->m_acc_reply_frame_hdl,
		    &reply->FunctionDependent1);
		if (action_type !=
		    MPI2_RAID_ACTION_SYSTEM_SHUTDOWN_INITIATED) {
			goto clear_and_continue;
		}
		found_reply = TRUE;

clear_and_continue:
		/*
		 * Clear the reply descriptor for re-use and increment index.
		 */
		ddi_put64(mpt->m_acc_post_queue_hdl,
		    &((uint64_t *)(void *)mpt->m_post_queue)[mpt->m_post_index],
		    0xFFFFFFFFFFFFFFFF);
		(void) ddi_dma_sync(mpt->m_dma_post_queue_hdl, 0, 0,
		    DDI_DMA_SYNC_FORDEV);

		/*
		 * Update the global reply index and keep looking for the
		 * reply if not found yet.
		 */
		if (++mpt->m_post_index == mpt->m_post_queue_depth) {
			mpt->m_post_index = 0;
		}
		ddi_put32(mpt->m_datap, &mpt->m_reg->ReplyPostHostIndex,
		    mpt->m_post_index);
		if (!found_reply) {
			continue;
		}

		break;
	}

	/*
	 * clear the used slot as the last step.
	 */
	slots->m_slot[MPTSAS_TM_SLOT(mpt)] = NULL;
}
Ejemplo n.º 15
0
void
pci_dump(void *arg)
{
	igb_t *igb = (igb_t *)arg;
	ddi_acc_handle_t handle;
	uint8_t cap_ptr;
	uint8_t next_ptr;
	uint32_t msix_bar;
	uint32_t msix_ctrl;
	uint32_t msix_tbl_sz;
	uint32_t tbl_offset;
	uint32_t tbl_bir;
	uint32_t pba_offset;
	uint32_t pba_bir;
	off_t offset;
	off_t mem_size;
	uintptr_t base;
	ddi_acc_handle_t acc_hdl;
	int i;

	handle = igb->osdep.cfg_handle;

	igb_log(igb, "Begin dump PCI config space");

	igb_log(igb,
	    "PCI_CONF_VENID:\t0x%x\n",
	    pci_config_get16(handle, PCI_CONF_VENID));
	igb_log(igb,
	    "PCI_CONF_DEVID:\t0x%x\n",
	    pci_config_get16(handle, PCI_CONF_DEVID));
	igb_log(igb,
	    "PCI_CONF_COMMAND:\t0x%x\n",
	    pci_config_get16(handle, PCI_CONF_COMM));
	igb_log(igb,
	    "PCI_CONF_STATUS:\t0x%x\n",
	    pci_config_get16(handle, PCI_CONF_STAT));
	igb_log(igb,
	    "PCI_CONF_REVID:\t0x%x\n",
	    pci_config_get8(handle, PCI_CONF_REVID));
	igb_log(igb,
	    "PCI_CONF_PROG_CLASS:\t0x%x\n",
	    pci_config_get8(handle, PCI_CONF_PROGCLASS));
	igb_log(igb,
	    "PCI_CONF_SUB_CLASS:\t0x%x\n",
	    pci_config_get8(handle, PCI_CONF_SUBCLASS));
	igb_log(igb,
	    "PCI_CONF_BAS_CLASS:\t0x%x\n",
	    pci_config_get8(handle, PCI_CONF_BASCLASS));
	igb_log(igb,
	    "PCI_CONF_CACHE_LINESZ:\t0x%x\n",
	    pci_config_get8(handle, PCI_CONF_CACHE_LINESZ));
	igb_log(igb,
	    "PCI_CONF_LATENCY_TIMER:\t0x%x\n",
	    pci_config_get8(handle, PCI_CONF_LATENCY_TIMER));
	igb_log(igb,
	    "PCI_CONF_HEADER_TYPE:\t0x%x\n",
	    pci_config_get8(handle, PCI_CONF_HEADER));
	igb_log(igb,
	    "PCI_CONF_BIST:\t0x%x\n",
	    pci_config_get8(handle, PCI_CONF_BIST));
	igb_log(igb,
	    "PCI_CONF_BASE0:\t0x%x\n",
	    pci_config_get32(handle, PCI_CONF_BASE0));
	igb_log(igb,
	    "PCI_CONF_BASE1:\t0x%x\n",
	    pci_config_get32(handle, PCI_CONF_BASE1));
	igb_log(igb,
	    "PCI_CONF_BASE2:\t0x%x\n",
	    pci_config_get32(handle, PCI_CONF_BASE2));

	/* MSI-X BAR */
	msix_bar = pci_config_get32(handle, PCI_CONF_BASE3);
	igb_log(igb,
	    "PCI_CONF_BASE3:\t0x%x\n", msix_bar);

	igb_log(igb,
	    "PCI_CONF_BASE4:\t0x%x\n",
	    pci_config_get32(handle, PCI_CONF_BASE4));
	igb_log(igb,
	    "PCI_CONF_BASE5:\t0x%x\n",
	    pci_config_get32(handle, PCI_CONF_BASE5));
	igb_log(igb,
	    "PCI_CONF_CIS:\t0x%x\n",
	    pci_config_get32(handle, PCI_CONF_CIS));
	igb_log(igb,
	    "PCI_CONF_SUBVENID:\t0x%x\n",
	    pci_config_get16(handle, PCI_CONF_SUBVENID));
	igb_log(igb,
	    "PCI_CONF_SUBSYSID:\t0x%x\n",
	    pci_config_get16(handle, PCI_CONF_SUBSYSID));
	igb_log(igb,
	    "PCI_CONF_ROM:\t0x%x\n",
	    pci_config_get32(handle, PCI_CONF_ROM));

	cap_ptr = pci_config_get8(handle, PCI_CONF_CAP_PTR);

	igb_log(igb,
	    "PCI_CONF_CAP_PTR:\t0x%x\n", cap_ptr);
	igb_log(igb,
	    "PCI_CONF_ILINE:\t0x%x\n",
	    pci_config_get8(handle, PCI_CONF_ILINE));
	igb_log(igb,
	    "PCI_CONF_IPIN:\t0x%x\n",
	    pci_config_get8(handle, PCI_CONF_IPIN));
	igb_log(igb,
	    "PCI_CONF_MIN_G:\t0x%x\n",
	    pci_config_get8(handle, PCI_CONF_MIN_G));
	igb_log(igb,
	    "PCI_CONF_MAX_L:\t0x%x\n",
	    pci_config_get8(handle, PCI_CONF_MAX_L));

	/* Power Management */
	offset = cap_ptr;

	igb_log(igb,
	    "PCI_PM_CAP_ID:\t0x%x\n",
	    pci_config_get8(handle, offset));

	next_ptr = pci_config_get8(handle, offset + 1);

	igb_log(igb,
	    "PCI_PM_NEXT_PTR:\t0x%x\n", next_ptr);
	igb_log(igb,
	    "PCI_PM_CAP:\t0x%x\n",
	    pci_config_get16(handle, offset + PCI_PMCAP));
	igb_log(igb,
	    "PCI_PM_CSR:\t0x%x\n",
	    pci_config_get16(handle, offset + PCI_PMCSR));
	igb_log(igb,
	    "PCI_PM_CSR_BSE:\t0x%x\n",
	    pci_config_get8(handle, offset + PCI_PMCSR_BSE));
	igb_log(igb,
	    "PCI_PM_DATA:\t0x%x\n",
	    pci_config_get8(handle, offset + PCI_PMDATA));

	/* MSI Configuration */
	offset = next_ptr;

	igb_log(igb,
	    "PCI_MSI_CAP_ID:\t0x%x\n",
	    pci_config_get8(handle, offset));

	next_ptr = pci_config_get8(handle, offset + 1);

	igb_log(igb,
	    "PCI_MSI_NEXT_PTR:\t0x%x\n", next_ptr);
	igb_log(igb,
	    "PCI_MSI_CTRL:\t0x%x\n",
	    pci_config_get16(handle, offset + PCI_MSI_CTRL));
	igb_log(igb,
	    "PCI_MSI_ADDR:\t0x%x\n",
	    pci_config_get32(handle, offset + PCI_MSI_ADDR_OFFSET));
	igb_log(igb,
	    "PCI_MSI_ADDR_HI:\t0x%x\n",
	    pci_config_get32(handle, offset + 0x8));
	igb_log(igb,
	    "PCI_MSI_DATA:\t0x%x\n",
	    pci_config_get16(handle, offset + 0xC));

	/* MSI-X Configuration */
	offset = next_ptr;

	igb_log(igb,
	    "PCI_MSIX_CAP_ID:\t0x%x\n",
	    pci_config_get8(handle, offset));

	next_ptr = pci_config_get8(handle, offset + 1);
	igb_log(igb,
	    "PCI_MSIX_NEXT_PTR:\t0x%x\n", next_ptr);

	msix_ctrl = pci_config_get16(handle, offset + PCI_MSIX_CTRL);
	msix_tbl_sz = msix_ctrl & 0x7ff;
	igb_log(igb,
	    "PCI_MSIX_CTRL:\t0x%x\n", msix_ctrl);

	tbl_offset = pci_config_get32(handle, offset + PCI_MSIX_TBL_OFFSET);
	tbl_bir = tbl_offset & PCI_MSIX_TBL_BIR_MASK;
	tbl_offset = tbl_offset & ~PCI_MSIX_TBL_BIR_MASK;
	igb_log(igb,
	    "PCI_MSIX_TBL_OFFSET:\t0x%x\n", tbl_offset);
	igb_log(igb,
	    "PCI_MSIX_TBL_BIR:\t0x%x\n", tbl_bir);

	pba_offset = pci_config_get32(handle, offset + PCI_MSIX_PBA_OFFSET);
	pba_bir = pba_offset & PCI_MSIX_PBA_BIR_MASK;
	pba_offset = pba_offset & ~PCI_MSIX_PBA_BIR_MASK;
	igb_log(igb,
	    "PCI_MSIX_PBA_OFFSET:\t0x%x\n", pba_offset);
	igb_log(igb,
	    "PCI_MSIX_PBA_BIR:\t0x%x\n", pba_bir);

	/* PCI Express Configuration */
	offset = next_ptr;

	igb_log(igb,
	    "PCIE_CAP_ID:\t0x%x\n",
	    pci_config_get8(handle, offset + PCIE_CAP_ID));

	next_ptr = pci_config_get8(handle, offset + PCIE_CAP_NEXT_PTR);

	igb_log(igb,
	    "PCIE_CAP_NEXT_PTR:\t0x%x\n", next_ptr);
	igb_log(igb,
	    "PCIE_PCIECAP:\t0x%x\n",
	    pci_config_get16(handle, offset + PCIE_PCIECAP));
	igb_log(igb,
	    "PCIE_DEVCAP:\t0x%x\n",
	    pci_config_get32(handle, offset + PCIE_DEVCAP));
	igb_log(igb,
	    "PCIE_DEVCTL:\t0x%x\n",
	    pci_config_get16(handle, offset + PCIE_DEVCTL));
	igb_log(igb,
	    "PCIE_DEVSTS:\t0x%x\n",
	    pci_config_get16(handle, offset + PCIE_DEVSTS));
	igb_log(igb,
	    "PCIE_LINKCAP:\t0x%x\n",
	    pci_config_get32(handle, offset + PCIE_LINKCAP));
	igb_log(igb,
	    "PCIE_LINKCTL:\t0x%x\n",
	    pci_config_get16(handle, offset + PCIE_LINKCTL));
	igb_log(igb,
	    "PCIE_LINKSTS:\t0x%x\n",
	    pci_config_get16(handle, offset + PCIE_LINKSTS));

	/* MSI-X Memory Space */
	if (ddi_dev_regsize(igb->dip, IGB_ADAPTER_MSIXTAB, &mem_size) !=
	    DDI_SUCCESS) {
		igb_log(igb, "ddi_dev_regsize() failed");
		return;
	}

	if ((ddi_regs_map_setup(igb->dip, IGB_ADAPTER_MSIXTAB, (caddr_t *)&base,
	    0, mem_size, &igb_regs_acc_attr, &acc_hdl)) != DDI_SUCCESS) {
		igb_log(igb, "ddi_regs_map_setup() failed");
		return;
	}

	igb_log(igb, "MSI-X Memory Space: (mem_size = %d, base = %x)",
	    mem_size, base);

	for (i = 0; i <= msix_tbl_sz; i++) {
		igb_log(igb, "MSI-X Table Entry(%d):", i);
		igb_log(igb, "lo_addr:\t%x",
		    ddi_get32(acc_hdl,
		    (uint32_t *)(base + tbl_offset + (i * 16))));
		igb_log(igb, "up_addr:\t%x",
		    ddi_get32(acc_hdl,
		    (uint32_t *)(base + tbl_offset + (i * 16) + 4)));
		igb_log(igb, "msg_data:\t%x",
		    ddi_get32(acc_hdl,
		    (uint32_t *)(base + tbl_offset + (i * 16) + 8)));
		igb_log(igb, "vct_ctrl:\t%x",
		    ddi_get32(acc_hdl,
		    (uint32_t *)(base + tbl_offset + (i * 16) + 12)));
	}

	igb_log(igb, "MSI-X Pending Bits:\t%x",
	    ddi_get32(acc_hdl, (uint32_t *)(base + pba_offset)));

	ddi_regs_map_free(&acc_hdl);
}
Ejemplo n.º 16
0
static int
acebus_config(ebus_devstate_t *ebus_p)
{
	ddi_acc_handle_t conf_handle;
	uint16_t comm;
#ifdef	ACEBUS_HOTPLUG
	int tcr_reg;
	caddr_t csr_io;
	ddi_device_acc_attr_t csr_attr = {   /* CSR map attributes */
		DDI_DEVICE_ATTR_V0,
		DDI_STRUCTURE_LE_ACC,
		DDI_STRICTORDER_ACC
	};
	ddi_acc_handle_t csr_handle;
#endif

	/*
	 * Make sure the master enable and memory access enable
	 * bits are set in the config command register.
	 */
	if (pci_config_setup(ebus_p->dip, &conf_handle) != DDI_SUCCESS)
		return (0);

	comm = pci_config_get16(conf_handle, PCI_CONF_COMM),
#ifdef DEBUG
	    DBG1(D_ATTACH, ebus_p, "command register was 0x%x\n", comm);
#endif
	comm |= (PCI_COMM_ME|PCI_COMM_MAE|PCI_COMM_SERR_ENABLE|
	    PCI_COMM_PARITY_DETECT);
	pci_config_put16(conf_handle, PCI_CONF_COMM, comm),
#ifdef DEBUG
	    DBG1(D_MAP, ebus_p, "command register is now 0x%x\n",
	    pci_config_get16(conf_handle, PCI_CONF_COMM));
#endif
	pci_config_put8(conf_handle, PCI_CONF_CACHE_LINESZ,
	    (uchar_t)acebus_cache_line_size);
	pci_config_put8(conf_handle, PCI_CONF_LATENCY_TIMER,
	    (uchar_t)acebus_latency_timer);
	pci_config_teardown(&conf_handle);

#ifdef	ACEBUS_HOTPLUG
	if (acebus_update_props(ebus_p) != DDI_SUCCESS) {
		cmn_err(CE_WARN, "%s%d: Could not update special properties.",
		    ddi_driver_name(ebus_p->dip),
		    ddi_get_instance(ebus_p->dip));
		return (0);
	}

	if (ddi_regs_map_setup(ebus_p->dip, CSR_IO_RINDEX,
	    (caddr_t *)&csr_io, 0, CSR_SIZE, &csr_attr,
	    &csr_handle) != DDI_SUCCESS) {
		cmn_err(CE_WARN, "%s%d: Could not map Ebus CSR.",
		    ddi_driver_name(ebus_p->dip),
		    ddi_get_instance(ebus_p->dip));
	}
#ifdef	DEBUG
	if (acebus_debug_flags) {
		DBG3(D_ATTACH, ebus_p, "tcr[123] = %x,%x,%x\n",
		    ddi_get32(csr_handle, (uint32_t *)((caddr_t)csr_io +
		    TCR1_OFF)),
		    ddi_get32(csr_handle, (uint32_t *)((caddr_t)csr_io +
		    TCR2_OFF)),
		    ddi_get32(csr_handle, (uint32_t *)((caddr_t)csr_io +
		    TCR3_OFF)));
		DBG2(D_ATTACH, ebus_p, "pmd-aux=%x, freq-aux=%x\n",
		    ddi_get32(csr_handle, (uint32_t *)((caddr_t)csr_io +
		    PMD_AUX_OFF)),
		    ddi_get32(csr_handle, (uint32_t *)((caddr_t)csr_io +
		    FREQ_AUX_OFF)));
#ifdef ACEBUS_DEBUG
		for (comm = 0; comm < 4; comm++)
			prom_printf("dcsr%d=%x, dacr%d=%x, dbcr%d=%x\n", comm,
			    ddi_get32(csr_handle, (uint32_t *)((caddr_t)csr_io +
			    0x700000+(0x2000*comm))), comm,
			    ddi_get32(csr_handle, (uint32_t *)((caddr_t)csr_io +
			    0x700000+(0x2000*comm)+4)), comm,
			    ddi_get32(csr_handle, (uint32_t *)((caddr_t)csr_io +
			    0x700000+(0x2000*comm)+8)));
#endif
	} /* acebus_debug_flags */
#endif
	/* If TCR registers are not initialized, initialize them here */
	tcr_reg = ddi_get32(csr_handle, (uint32_t *)((caddr_t)csr_io +
	    TCR1_OFF));
	if ((tcr_reg == 0) || (tcr_reg == -1))
		ddi_put32(csr_handle, (uint32_t *)((caddr_t)csr_io + TCR1_OFF),
		    TCR1_REGVAL);
	tcr_reg = ddi_get32(csr_handle, (uint32_t *)((caddr_t)csr_io +
	    TCR2_OFF));
	if ((tcr_reg == 0) || (tcr_reg == -1))
		ddi_put32(csr_handle, (uint32_t *)((caddr_t)csr_io + TCR2_OFF),
		    TCR2_REGVAL);
	tcr_reg = ddi_get32(csr_handle, (uint32_t *)((caddr_t)csr_io +
	    TCR3_OFF));
	if ((tcr_reg == 0) || (tcr_reg == -1))
		ddi_put32(csr_handle, (uint32_t *)((caddr_t)csr_io + TCR3_OFF),
		    TCR3_REGVAL);
#ifdef	DEBUG
	if (acebus_debug_flags) {
		DBG3(D_ATTACH, ebus_p, "wrote tcr[123] = %x,%x,%x\n",
		    ddi_get32(csr_handle, (uint32_t *)((caddr_t)csr_io +
		    TCR1_OFF)),
		    ddi_get32(csr_handle, (uint32_t *)((caddr_t)csr_io +
		    TCR2_OFF)),
		    ddi_get32(csr_handle, (uint32_t *)((caddr_t)csr_io +
		    TCR3_OFF)));
	}
#endif

	ddi_regs_map_free(&csr_handle);
#endif	/* ACEBUS_HOTPLUG */
	return (1);	/* return success */
}
Ejemplo n.º 17
0
uint32_t
ipw2200_csr_get32(struct ipw2200_softc *sc, uint32_t off)
{
	return (ddi_get32(sc->sc_ioh,
	    (uint32_t *)((uintptr_t)sc->sc_regs + off)));
}
Ejemplo n.º 18
0
static int
mptsas_raidconf_page_0_cb(mptsas_t *mpt, caddr_t page_memp,
    ddi_acc_handle_t accessp, uint16_t iocstatus, uint32_t iocloginfo,
    va_list ap)
{
#ifndef __lock_lint
	_NOTE(ARGUNUSED(ap))
#endif
	pMpi2RaidConfigurationPage0_t	raidconfig_page0;
	pMpi2RaidConfig0ConfigElement_t	element;
	uint32_t *confignum;
	int rval = DDI_SUCCESS, i;
	uint8_t numelements, vol, disk;
	uint16_t elementtype, voldevhandle;
	uint16_t etype_vol, etype_pd, etype_hs;
	uint16_t etype_oce;
	mptsas_slots_t *slots = mpt->m_active;
	m_raidconfig_t *raidconfig;
	uint64_t raidwwn;
	uint32_t native;
	mptsas_target_t	*ptgt;
	uint32_t configindex;

	if (iocstatus == MPI2_IOCSTATUS_CONFIG_INVALID_PAGE) {
		return (DDI_FAILURE);
	}

	if (iocstatus != MPI2_IOCSTATUS_SUCCESS) {
		mptsas_log(mpt, CE_WARN, "mptsas_get_raid_conf_page0 "
		    "config: IOCStatus=0x%x, IOCLogInfo=0x%x",
		    iocstatus, iocloginfo);
		rval = DDI_FAILURE;
		return (rval);
	}
	confignum = va_arg(ap,  uint32_t *);
	configindex = va_arg(ap, uint32_t);
	raidconfig_page0 = (pMpi2RaidConfigurationPage0_t)page_memp;
	/*
	 * Get all RAID configurations.
	 */
	etype_vol = MPI2_RAIDCONFIG0_EFLAGS_VOLUME_ELEMENT;
	etype_pd = MPI2_RAIDCONFIG0_EFLAGS_VOL_PHYS_DISK_ELEMENT;
	etype_hs = MPI2_RAIDCONFIG0_EFLAGS_HOT_SPARE_ELEMENT;
	etype_oce = MPI2_RAIDCONFIG0_EFLAGS_OCE_ELEMENT;
	/*
	 * Set up page address for next time through.
	 */
	*confignum =  ddi_get8(accessp,
	    &raidconfig_page0->ConfigNum);

	/*
	 * Point to the right config in the structure.
	 * Increment the number of valid RAID configs.
	 */
	raidconfig = &slots->m_raidconfig[configindex];
	slots->m_num_raid_configs++;

	/*
	 * Set the native flag if this is not a foreign
	 * configuration.
	 */
	native = ddi_get32(accessp, &raidconfig_page0->Flags);
	if (native & MPI2_RAIDCONFIG0_FLAG_FOREIGN_CONFIG) {
		native = FALSE;
	} else {
		native = TRUE;
	}
	raidconfig->m_native = (uint8_t)native;

	/*
	 * Get volume information for the volumes in the
	 * config.
	 */
	numelements = ddi_get8(accessp, &raidconfig_page0->NumElements);
	vol = 0;
	disk = 0;
	element = (pMpi2RaidConfig0ConfigElement_t)
	    &raidconfig_page0->ConfigElement;

	for (i = 0; ((i < numelements) && native); i++, element++) {
		/*
		 * Get the element type.  Could be Volume,
		 * PhysDisk, Hot Spare, or Online Capacity
		 * Expansion PhysDisk.
		 */
		elementtype = ddi_get16(accessp, &element->ElementFlags);
		elementtype &= MPI2_RAIDCONFIG0_EFLAGS_MASK_ELEMENT_TYPE;

		/*
		 * For volumes, get the RAID settings and the
		 * WWID.
		 */
		if (elementtype == etype_vol) {
			voldevhandle = ddi_get16(accessp,
			    &element->VolDevHandle);
			raidconfig->m_raidvol[vol].m_israid = 1;
			raidconfig->m_raidvol[vol].
			    m_raidhandle = voldevhandle;
			/*
			 * Get the settings for the raid
			 * volume.  This includes the
			 * DevHandles for the disks making up
			 * the raid volume.
			 */
			if (mptsas_get_raid_settings(mpt,
			    &raidconfig->m_raidvol[vol]))
				continue;

			/*
			 * Get the WWID of the RAID volume for
			 * SAS HBA
			 */
			if (mptsas_get_raid_wwid(mpt,
			    &raidconfig->m_raidvol[vol]))
				continue;

			raidwwn = raidconfig->m_raidvol[vol].
			    m_raidwwid;

			/*
			 * RAID uses phymask of 0.
			 */
			ptgt = mptsas_tgt_alloc(&slots->m_tgttbl,
			    voldevhandle, raidwwn, 0, 0, 0, mpt);

			raidconfig->m_raidvol[vol].m_raidtgt =
			    ptgt;

			/*
			 * Increment volume index within this
			 * raid config.
			 */
			vol++;
		} else if ((elementtype == etype_pd) ||
		    (elementtype == etype_hs) ||
		    (elementtype == etype_oce)) {
			/*
			 * For all other element types, put
			 * their DevHandles in the phys disk
			 * list of the config.  These are all
			 * some variation of a Phys Disk and
			 * this list is used to keep these
			 * disks from going online.
			 */
			raidconfig->m_physdisk_devhdl[disk] = ddi_get16(accessp,
			    &element->PhysDiskDevHandle);

			/*
			 * Increment disk index within this
			 * raid config.
			 */
			disk++;
		}
	}

	return (rval);
}
Ejemplo n.º 19
0
/*
 * SBBC Interrupt Handler
 *
 * Check the SBBC Port Interrupt Status
 * register to verify that its our interrupt.
 * If yes, clear the register.
 *
 * Then read the 'interrupt reason' field from SRAM,
 * this triggers the appropriate soft_intr handler
 */
uint_t
sbbc_intr_handler(caddr_t arg)
{
	sbbc_softstate_t	*softsp = (sbbc_softstate_t *)arg;
	uint32_t		*port_int_reg;
	volatile uint32_t	port_int_status;
	volatile uint32_t	intr_reason;
	uint32_t		intr_enabled;
	sbbc_intrs_t		*intr;
	int			i, intr_mask;
	struct tunnel_key	tunnel_key;
	ddi_acc_handle_t	intr_in_handle;
	uint32_t		*intr_in_reason;

	if (softsp == (sbbc_softstate_t *)NULL) {

		return (DDI_INTR_UNCLAIMED);
	}

	mutex_enter(&softsp->sbbc_lock);

	if (softsp->port_int_regs == NULL) {
		mutex_exit(&softsp->sbbc_lock);
		return (DDI_INTR_UNCLAIMED);
	}

	/*
	 * Normally if port_int_status is 0, we assume it is not
	 * our interrupt.  However, we don't want to miss the
	 * ones that come in during tunnel switch.  Therefore,
	 * we always check the interrupt reason bits in IOSRAM
	 * to be sure.
	 */
	port_int_reg = softsp->port_int_regs;

	port_int_status = ddi_get32(softsp->sbbc_reg_handle1, port_int_reg);

	/*
	 * Generate a softint for each interrupt
	 * bit set in the intr_in_reason field in SRAM
	 * that has a corresponding bit set in the
	 * intr_in_enabled field in SRAM
	 */

	if (iosram_read(SBBC_SC_INTR_ENABLED_KEY, 0,
		(caddr_t)&intr_enabled, sizeof (intr_enabled))) {

		goto intr_handler_exit;
	}

	tunnel_key = master_iosram->tunnel->tunnel_keys[SBBC_SC_INTR_KEY];
	intr_in_reason = (uint32_t *)tunnel_key.base;
	intr_in_handle = tunnel_key.reg_handle;

	intr_reason = ddi_get32(intr_in_handle, intr_in_reason);

	SGSBBC_DBG_INTR(CE_CONT, "intr_reason = %x\n", intr_reason);

	intr_reason &= intr_enabled;

	for (i = 0; i < SBBC_MAX_INTRS; i++) {
		intr_mask = (1 << i);
		if (intr_reason & intr_mask) {
			intr = &softsp->intr_hdlrs[i];
			if ((intr != NULL) &&
				(intr->sbbc_intr_id != 0)) {
				/*
				 * XXXX
				 * The model we agree with a handler
				 * is that they run until they have
				 * exhausted all work. To avoid
				 * triggering them again, they pass
				 * a state flag and lock when registering.
				 * We check the flag, if they are idle,
				 * we trigger.
				 * The interrupt handler should so
				 *   intr_func()
				 *	mutex_enter(sbbc_intr_lock);
				 *	sbbc_intr_state = RUNNING;
				 *	mutex_exit(sbbc_intr_lock);
				 *	  ..........
				 *	  ..........
				 *	  ..........
				 *	mutex_enter(sbbc_intr_lock);
				 *	sbbc_intr_state = IDLE;
				 *	mutex_exit(sbbc_intr_lock);
				 *
				 * XXXX
				 */
				mutex_enter(intr->sbbc_intr_lock);
				if (*(intr->sbbc_intr_state) ==
					SBBC_INTR_IDLE) {
					mutex_exit(intr->sbbc_intr_lock);
					ddi_trigger_softintr(
						intr->sbbc_intr_id);
				} else {
					/*
					 * The handler is running
					 */
					mutex_exit(intr->sbbc_intr_lock);
				}
				intr_reason &= ~intr_mask;
				/*
				 * Clear the corresponding reason bit in SRAM
				 *
				 * Since there is no interlocking between
				 * Solaris and the SC when writing to SRAM,
				 * it is possible for the SC to set another
				 * bit in the interrupt reason field while
				 * we are handling the current interrupt.
				 * To minimize the window in which an
				 * additional bit can be set, reading
				 * and writing the interrupt reason
				 * in SRAM must be as close as possible.
				 */
				ddi_put32(intr_in_handle, intr_in_reason,
					ddi_get32(intr_in_handle,
					intr_in_reason) & ~intr_mask);
			}
		}
		if (intr_reason == 0)	/* No more interrupts to be processed */
			break;
	}

	/*
	 * Clear the Interrupt Status Register (RW1C)
	 */
	ddi_put32(softsp->sbbc_reg_handle1, port_int_reg, port_int_status);

	port_int_status = ddi_get32(softsp->sbbc_reg_handle1, port_int_reg);

intr_handler_exit:

	mutex_exit(&softsp->sbbc_lock);

	return (DDI_INTR_CLAIMED);

}
Ejemplo n.º 20
0
/**
 * Virtio Pci get queue routine. Allocates a PCI queue and DMA resources.
 *
 * @param pDevice           Pointer to the Virtio device instance.
 * @param pQueue            Where to store the queue.
 *
 * @return An allocated Virtio Pci queue, or NULL in case of errors.
 */
static void *VirtioPciGetQueue(PVIRTIODEVICE pDevice, PVIRTIOQUEUE pQueue)
{
    LogFlowFunc((VIRTIOLOGNAME ":VirtioPciGetQueue pDevice=%p pQueue=%p\n", pDevice, pQueue));
    AssertReturn(pDevice, NULL);

    virtio_pci_t *pPci = pDevice->pvHyper;
    AssertReturn(pPci, NULL);

    /*
     * Select a Queue.
     */
    ddi_put16(pPci->hIO, (uint16_t *)(pPci->addrIOBase + VIRTIO_PCI_QUEUE_SEL), pQueue->QueueIndex);

    /*
     * Get the currently selected Queue's size.
     */
    pQueue->Ring.cDesc = ddi_get16(pPci->hIO, (uint16_t *)(pPci->addrIOBase + VIRTIO_PCI_QUEUE_NUM));
    if (RT_UNLIKELY(!pQueue->Ring.cDesc))
    {
        LogRel((VIRTIOLOGNAME ": VirtioPciGetQueue: Queue[%d] has no descriptors.\n", pQueue->QueueIndex));
        return NULL;
    }

    /*
     * Check if it's already active.
     */
    uint32_t QueuePFN = ddi_get32(pPci->hIO, (uint32_t *)(pPci->addrIOBase + VIRTIO_PCI_QUEUE_PFN));
    if (QueuePFN != 0)
    {
        LogRel((VIRTIOLOGNAME ":VirtioPciGetQueue: Queue[%d] is already used.\n", pQueue->QueueIndex));
        return NULL;
    }

    LogFlow(("Queue[%d] has %d slots.\n", pQueue->QueueIndex, pQueue->Ring.cDesc));

    /*
     * Allocate and initialize Pci queue data.
     */
    virtio_pci_queue_t *pPciQueue = RTMemAllocZ(sizeof(virtio_pci_queue_t));
    if (pPciQueue)
    {
        /*
         * Setup DMA.
         */
        size_t cbQueue = VirtioRingSize(pQueue->Ring.cDesc, VIRTIO_PCI_RING_ALIGN);
        int rc = ddi_dma_alloc_handle(pDevice->pDip, &g_VirtioPciDmaAttrRing, DDI_DMA_SLEEP, 0 /* addr */, &pPciQueue->hDMA);
        if (rc == DDI_SUCCESS)
        {
            rc = ddi_dma_mem_alloc(pPciQueue->hDMA, cbQueue, &g_VirtioPciAccAttrRing, DDI_DMA_CONSISTENT,
                                   DDI_DMA_SLEEP, 0 /* addr */, &pQueue->pQueue, &pPciQueue->cbBuf,
                                   &pPciQueue->hIO);
            if (rc == DDI_SUCCESS)
            {
                AssertRelease(pPciQueue->cbBuf >= cbQueue);
                ddi_dma_cookie_t DmaCookie;
                uint_t cCookies;
                rc = ddi_dma_addr_bind_handle(pPciQueue->hDMA, NULL /* addrspace */, pQueue->pQueue, pPciQueue->cbBuf,
                                              DDI_DMA_RDWR | DDI_DMA_CONSISTENT, DDI_DMA_SLEEP,
                                              0 /* addr */, &DmaCookie, &cCookies);
                if (rc == DDI_SUCCESS)
                {
                    pPciQueue->physBuf = DmaCookie.dmac_laddress;
                    pPciQueue->pageBuf = pPciQueue->physBuf >> VIRTIO_PCI_QUEUE_ADDR_SHIFT;

                    LogFlow((VIRTIOLOGNAME ":VirtioPciGetQueue: Queue[%d]%p physBuf=%x pfn of Buf %#x\n", pQueue->QueueIndex,
                             pQueue->pQueue, pPciQueue->physBuf, pPciQueue->pageBuf));
                    cmn_err(CE_NOTE, ":VirtioPciGetQueue: Queue[%d]%p physBuf=%x pfn of Buf %x\n", pQueue->QueueIndex,
                             pQueue->pQueue, pPciQueue->physBuf, pPciQueue->pageBuf);

                    /*
                     * Activate the queue and initialize a ring for the queue.
                     */
                    memset(pQueue->pQueue, 0, pPciQueue->cbBuf);
                    ddi_put32(pPci->hIO, (uint32_t *)(pPci->addrIOBase + VIRTIO_PCI_QUEUE_PFN), pPciQueue->pageBuf);
                    VirtioRingInit(pQueue, pQueue->Ring.cDesc, pQueue->pQueue, VIRTIO_PCI_RING_ALIGN);
                    return pPciQueue;
                }
                else
/*
 * hci1394_ixl_intr_check_done()
 *    checks if context has stopped, or if able to match hardware location
 *    with an expected IXL program location.
 */
static int
hci1394_ixl_intr_check_done(hci1394_state_t *soft_statep,
    hci1394_iso_ctxt_t *ctxtp)
{
	ixl1394_command_t   *ixlp;
	hci1394_xfer_ctl_t  *xferctlp;
	uint_t		    ixldepth;
	hci1394_xfer_ctl_dma_t *dma;
	ddi_acc_handle_t    acc_hdl;
	ddi_dma_handle_t    dma_hdl;
	uint32_t	    desc_status;
	hci1394_desc_t	    *hcidescp;
	off_t		    hcidesc_off;
	int		    err;
	uint32_t	    dma_cmd_cur_loc;
	uint32_t	    dma_cmd_last_loc;
	uint32_t	    dma_loc_check_enabled;
	uint32_t	    dmastartp;
	uint32_t	    dmaendp;

	uint_t		    rem_dma_skips;
	uint16_t	    skipmode;
	uint16_t	    skipdepth;
	ixl1394_command_t   *skipdestp;
	ixl1394_command_t   *skipxferp;

	TNF_PROBE_0_DEBUG(hci1394_ixl_intr_check_done_enter,
	    HCI1394_TNF_HAL_STACK_ISOCH, "");

	/*
	 * start looking through the IXL list from the xfer start command where
	 * we last left off (for composite opcodes, need to start from the
	 * appropriate depth).
	 */

	ixlp = ctxtp->ixl_execp;
	ixldepth = ctxtp->ixl_exec_depth;

	/* control struct for xfer start IXL command */
	xferctlp = (hci1394_xfer_ctl_t *)ixlp->compiler_privatep;
	dma = &xferctlp->dma[ixldepth];

	/* determine if dma location checking is enabled */
	if ((dma_loc_check_enabled =
	    (ctxtp->ctxt_flags & HCI1394_ISO_CTXT_CMDREG)) != 0) {

		/* if so, get current dma command location */
		dma_cmd_last_loc = 0xFFFFFFFF;

		while ((dma_cmd_cur_loc = HCI1394_ISOCH_CTXT_CMD_PTR(
		    soft_statep, ctxtp)) != dma_cmd_last_loc) {

			/* retry get until location register stabilizes */
			dma_cmd_last_loc = dma_cmd_cur_loc;
		}
	}

	/*
	 * compare the (bound) address of the DMA descriptor corresponding to
	 * the current xfer IXL command against the current value in the
	 * DMA location register.  If exists and if matches, then
	 *    if context stopped, return stopped, else return done.
	 *
	 * The dma start address is the first address of the descriptor block.
	 * Since "Z" is a count of 16-byte descriptors in the block, calculate
	 * the end address by adding Z*16 to the start addr.
	 */
	dmastartp = dma->dma_bound & ~DESC_Z_MASK;
	dmaendp = dmastartp + ((dma->dma_bound & DESC_Z_MASK) << 4);

	if (dma_loc_check_enabled &&
	    ((dma_cmd_cur_loc >= dmastartp) && (dma_cmd_cur_loc < dmaendp))) {

		if (HCI1394_ISOCH_CTXT_ACTIVE(soft_statep, ctxtp) == 0) {
			TNF_PROBE_1_DEBUG(hci1394_ixl_intr_check_done_exit,
			    HCI1394_TNF_HAL_STACK_ISOCH, "", tnf_string, msg,
			    "CHECK_STOP");
			return (IXL_CHECK_STOP);
		}

		TNF_PROBE_1_DEBUG(hci1394_ixl_intr_check_done_exit,
		    HCI1394_TNF_HAL_STACK_ISOCH, "", tnf_string, msg,
		    "CHECK_DONE");
		return (IXL_CHECK_DONE);
	}

	/*
	 * if receive mode:
	 */
	if ((ixlp->ixl_opcode & IXL1394_OPF_ONXMIT) == 0)  {
		/*
		 * if context stopped, return stopped, else,
		 * if there is no current dma location reg, return done
		 * else return location indeterminate
		 */
		if (HCI1394_ISOCH_CTXT_ACTIVE(soft_statep, ctxtp) == 0) {
			TNF_PROBE_1_DEBUG(hci1394_ixl_intr_check_done_exit,
			    HCI1394_TNF_HAL_STACK_ISOCH, "", tnf_string, msg,
			    "CHECK_STOP");
			return (IXL_CHECK_STOP);
		}
		if (!dma_loc_check_enabled) {
			TNF_PROBE_1_DEBUG(hci1394_ixl_intr_check_done_exit,
			    HCI1394_TNF_HAL_STACK_ISOCH, "", tnf_string, msg,
			    "CHECK_DONE");
			return (IXL_CHECK_DONE);
		}

		TNF_PROBE_1_DEBUG(hci1394_ixl_intr_check_done_exit,
		    HCI1394_TNF_HAL_STACK_ISOCH, "", tnf_string, msg,
		    "CHECK_LOST");
		return (IXL_CHECK_LOST);
	}

	/*
	 * else is xmit mode:
	 * check status of current xfer IXL command's dma descriptor
	 */
	acc_hdl  = dma->dma_buf->bi_handle;
	dma_hdl  = dma->dma_buf->bi_dma_handle;
	hcidescp = (hci1394_desc_t *)dma->dma_descp;
	hcidesc_off = (off_t)hcidescp - (off_t)dma->dma_buf->bi_kaddr;

	/* Sync the descriptor before we get the status */
	err = ddi_dma_sync(dma_hdl, hcidesc_off, sizeof (hci1394_desc_t),
	    DDI_DMA_SYNC_FORCPU);
	if (err != DDI_SUCCESS) {
		TNF_PROBE_1(hci1394_ixl_intr_check_done_error,
		    HCI1394_TNF_HAL_ERROR_ISOCH, "", tnf_string, errmsg,
		    "dma_sync() failed");
	}
	desc_status = ddi_get32(acc_hdl, &hcidescp->status);

	if ((desc_status & DESC_XFER_ACTIVE_MASK) != 0) {

		/*
		 * if status is now set here, return skipped, to cause calling
		 * function to continue, even though location hasn't changed
		 */
		TNF_PROBE_1_DEBUG(hci1394_ixl_intr_check_done_exit,
		    HCI1394_TNF_HAL_STACK_ISOCH, "", tnf_string, msg,
		    "CHECK_SKIP");
		return (IXL_CHECK_SKIP);
	}

	/*
	 * At this point, we have gotten to a DMA descriptor with an empty
	 * status.  This is not enough information however to determine that
	 * we've found all processed DMA descriptors because during cycle-lost
	 * conditions, the HW will skip over some descriptors without writing
	 * status.  So we have to look ahead until we're convinced that the HW
	 * hasn't jumped ahead.
	 *
	 * Follow the IXL skip-to links until find one whose status is set
	 * or until dma location register (if any) matches an xfer IXL
	 * command's dma location or until have examined max_dma_skips
	 * IXL commands.
	 */
	rem_dma_skips = ctxtp->max_dma_skips;

	while (rem_dma_skips-- > 0) {

		/*
		 * get either IXL command specific or
		 * system default skipmode info
		 */
		skipdepth = 0;
		if (xferctlp->skipmodep != NULL) {
			skipmode  = xferctlp->skipmodep->skipmode;
			skipdestp = xferctlp->skipmodep->label;
			skipxferp = (ixl1394_command_t *)
			    xferctlp->skipmodep->compiler_privatep;
		} else {
			skipmode  = ctxtp->default_skipmode;
			skipdestp = ctxtp->default_skiplabelp;
			skipxferp = ctxtp->default_skipxferp;
		}

		switch (skipmode) {

		case IXL1394_SKIP_TO_SELF:
			/*
			 * mode is skip to self:
			 *   if context is stopped, return stopped, else
			 *   if dma location reg not enabled, return done
			 *   else, return location indeterminate
			 */
			if (HCI1394_ISOCH_CTXT_ACTIVE(soft_statep, ctxtp) ==
			    0) {
				TNF_PROBE_1_DEBUG(
					hci1394_ixl_intr_check_done_exit,
					HCI1394_TNF_HAL_STACK_ISOCH, "",
					tnf_string, msg, "CHECK_STOP");
				return (IXL_CHECK_STOP);
			}

			if (!dma_loc_check_enabled) {
				TNF_PROBE_1_DEBUG(
					hci1394_ixl_intr_check_done_exit,
					HCI1394_TNF_HAL_STACK_ISOCH, "",
					tnf_string, msg, "CHECK_DONE");
				return (IXL_CHECK_DONE);
			}

			TNF_PROBE_1_DEBUG(hci1394_ixl_intr_check_done_exit,
			    HCI1394_TNF_HAL_STACK_ISOCH, "", tnf_string, msg,
			    "CHECK_LOST");
			return (IXL_CHECK_LOST);

		case IXL1394_SKIP_TO_NEXT:
			/*
			 * mode is skip to next:
			 *    set potential skip target to current command at
			 *    next depth
			 */
			skipdestp = ixlp;
			skipxferp = ixlp;
			skipdepth = ixldepth + 1;

			/*
			 * else if at max depth at current cmd adjust to next
			 * IXL command.
			 *
			 * (NOTE: next means next IXL command along execution
			 * path,  whatever IXL command it might be.  e.g. store
			 * timestamp or callback or label or jump or send... )
			 */
			if (skipdepth >= xferctlp->cnt) {
				skipdepth = 0;
				skipdestp = ixlp->next_ixlp;
				skipxferp = xferctlp->execp;
			}

			/* evaluate skip to status further, below */
			break;


		case IXL1394_SKIP_TO_LABEL:
			/*
			 * mode is skip to label:
			 *    set skip destination depth to 0 (should be
			 *    redundant)
			 */
			skipdepth = 0;

			/* evaluate skip to status further, below */
			break;

		case IXL1394_SKIP_TO_STOP:
			/*
			 * mode is skip to stop:
			 *    set all xfer and destination skip to locations to
			 *    null
			 */
			skipxferp = NULL;
			skipdestp = NULL;
			skipdepth = 0;

			/* evaluate skip to status further, below */
			break;

		} /* end switch */

		/*
		 * if no xfer IXL command follows at or after current skip-to
		 * location
		 */
		if (skipxferp == NULL) {
			/*
			 *   if context is stopped, return stopped, else
			 *   if dma location reg not enabled, return done
			 *   else, return location indeterminate
			 */
			if (HCI1394_ISOCH_CTXT_ACTIVE(soft_statep, ctxtp) ==
			    0) {
				TNF_PROBE_1_DEBUG(
					hci1394_ixl_intr_check_done_exit,
					HCI1394_TNF_HAL_STACK_ISOCH, "",
					tnf_string, msg, "CHECK_STOP");
				return (IXL_CHECK_STOP);
			}

			if (!dma_loc_check_enabled) {
				TNF_PROBE_1_DEBUG(
					hci1394_ixl_intr_check_done_exit,
					HCI1394_TNF_HAL_STACK_ISOCH, "",
					tnf_string, msg, "CHECK_DONE");
				return (IXL_CHECK_DONE);
			}
			TNF_PROBE_1_DEBUG(hci1394_ixl_intr_check_done_exit,
			    HCI1394_TNF_HAL_STACK_ISOCH, "", tnf_string, msg,
			    "CHECK_LOST");
			return (IXL_CHECK_LOST);
		}

		/*
		 * if the skip to xfer IXL dma descriptor's status is set,
		 * then execution did skip
		 */
		xferctlp = (hci1394_xfer_ctl_t *)skipxferp->compiler_privatep;
		dma	 = &xferctlp->dma[skipdepth];
		acc_hdl  = dma->dma_buf->bi_handle;
		dma_hdl  = dma->dma_buf->bi_dma_handle;
		hcidescp = (hci1394_desc_t *)dma->dma_descp;
		hcidesc_off = (off_t)hcidescp - (off_t)dma->dma_buf->bi_kaddr;

		/* Sync the descriptor before we get the status */
		err = ddi_dma_sync(dma_hdl, hcidesc_off,
		    sizeof (hci1394_desc_t), DDI_DMA_SYNC_FORCPU);
		if (err != DDI_SUCCESS) {
			TNF_PROBE_1(hci1394_ixl_intr_check_done_error,
			    HCI1394_TNF_HAL_ERROR_ISOCH, "", tnf_string, errmsg,
			    "dma_sync() failed");
		}
		desc_status = ddi_get32(acc_hdl, &hcidescp->status);

		if ((desc_status & DESC_XFER_ACTIVE_MASK) != 0) {

			/*
			 * adjust to continue from skip to IXL command and
			 * return skipped, to have calling func continue.
			 * (Note: next IXL command may be any allowed IXL
			 * command)
			 */
			ctxtp->ixl_execp = skipdestp;
			ctxtp->ixl_exec_depth = skipdepth;

			TNF_PROBE_1_DEBUG(hci1394_ixl_intr_check_done_exit,
			    HCI1394_TNF_HAL_STACK_ISOCH, "", tnf_string, msg,
			    "CHECK_SKIP");
			return (IXL_CHECK_SKIP);
		}

		/*
		 * if dma location command register checking is enabled,
		 * and the skip to xfer IXL dma location matches current
		 * dma location register value, execution did skip
		 */
		dmastartp = dma->dma_bound & ~DESC_Z_MASK;
		dmaendp = dmastartp + ((dma->dma_bound & DESC_Z_MASK) << 4);

		if (dma_loc_check_enabled && ((dma_cmd_cur_loc >= dmastartp) &&
		    (dma_cmd_cur_loc < dmaendp))) {

			/* if the context is stopped, return stopped */
			if (HCI1394_ISOCH_CTXT_ACTIVE(soft_statep, ctxtp) ==
			    0) {
				TNF_PROBE_1_DEBUG(
					hci1394_ixl_intr_check_done_exit,
					HCI1394_TNF_HAL_STACK_ISOCH, "",
					tnf_string, msg, "CHECK STOP");
				return (IXL_CHECK_STOP);
			}
			/*
			 * adjust to continue from skip to IXL command and
			 * return skipped, to have calling func continue
			 * (Note: next IXL command may be any allowed IXL cmd)
			 */
			ctxtp->ixl_execp = skipdestp;
			ctxtp->ixl_exec_depth = skipdepth;

			TNF_PROBE_1_DEBUG(hci1394_ixl_intr_check_done_exit,
			    HCI1394_TNF_HAL_STACK_ISOCH, "", tnf_string, msg,
			    "CHECK_SKIP");
			return (IXL_CHECK_SKIP);
		}

		/*
		 * else, advance working current locn to skipxferp and
		 * skipdepth and continue skip evaluation loop processing
		 */
		ixlp = skipxferp;
		ixldepth = skipdepth;

	} /* end while */

	/*
	 * didn't find dma status set, nor location reg match, along skip path
	 *
	 * if context is stopped, return stopped,
	 *
	 * else if no current location reg active don't change context values,
	 * just return done (no skip)
	 *
	 * else, return location indeterminate
	 */

	if (HCI1394_ISOCH_CTXT_ACTIVE(soft_statep, ctxtp) == 0) {
		TNF_PROBE_1_DEBUG(hci1394_ixl_intr_check_done_exit,
		    HCI1394_TNF_HAL_STACK_ISOCH, "", tnf_string, msg,
		    "CHECK_STOP");
		return (IXL_CHECK_STOP);
	}
	if (!dma_loc_check_enabled) {
		TNF_PROBE_1_DEBUG(hci1394_ixl_intr_check_done_exit,
		    HCI1394_TNF_HAL_STACK_ISOCH, "", tnf_string, msg,
		    "CHECK_DONE");
		return (IXL_CHECK_DONE);
	}

	TNF_PROBE_1_DEBUG(hci1394_ixl_intr_check_done_exit,
	    HCI1394_TNF_HAL_STACK_ISOCH, "", tnf_string, msg, "CHECK_LOST");
	return (IXL_CHECK_LOST);
}